Merge branch 'lightcli' into light-filters

This commit is contained in:
Robert Habermeier 2017-04-03 12:55:31 +02:00
commit 60ce0aee1a
81 changed files with 5432 additions and 2449 deletions

205
Cargo.lock generated
View File

@ -10,7 +10,6 @@ dependencies = [
"docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.7.0", "ethcore 1.7.0",
"ethcore-dapps 1.7.0",
"ethcore-devtools 1.7.0", "ethcore-devtools 1.7.0",
"ethcore-io 1.7.0", "ethcore-io 1.7.0",
"ethcore-ipc 1.7.0", "ethcore-ipc 1.7.0",
@ -24,16 +23,17 @@ dependencies = [
"ethcore-signer 1.7.0", "ethcore-signer 1.7.0",
"ethcore-stratum 1.7.0", "ethcore-stratum 1.7.0",
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"ethkey 0.2.0",
"ethsync 1.7.0", "ethsync 1.7.0",
"evmbin 0.1.0", "evmbin 0.1.0",
"fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps 1.7.0",
"parity-hash-fetch 1.7.0", "parity-hash-fetch 1.7.0",
"parity-ipfs-api 1.7.0", "parity-ipfs-api 1.7.0",
"parity-local-store 0.1.0", "parity-local-store 0.1.0",
@ -41,6 +41,7 @@ dependencies = [
"parity-rpc-client 1.4.0", "parity-rpc-client 1.4.0",
"parity-updater 1.7.0", "parity-updater 1.7.0",
"path 0.1.0", "path 0.1.0",
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0", "rlp 0.1.0",
"rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -310,6 +311,11 @@ dependencies = [
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "difference"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "docopt" name = "docopt"
version = "0.7.0" version = "0.7.0"
@ -362,7 +368,7 @@ dependencies = [
[[package]] [[package]]
name = "ethabi" name = "ethabi"
version = "1.0.0" version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -393,7 +399,7 @@ dependencies = [
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.7.0", "ethash 1.7.0",
"ethcore-bloom-journal 0.1.0", "ethcore-bloom-journal 0.1.0",
"ethcore-devtools 1.7.0", "ethcore-devtools 1.7.0",
@ -408,6 +414,7 @@ dependencies = [
"ethkey 0.2.0", "ethkey 0.2.0",
"ethstore 0.1.0", "ethstore 0.1.0",
"evmjit 1.7.0", "evmjit 1.7.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"hardware-wallet 1.7.0", "hardware-wallet 1.7.0",
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
@ -415,6 +422,7 @@ dependencies = [
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contracts 0.1.0",
"num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", "num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
@ -445,40 +453,6 @@ dependencies = [
"siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "ethcore-dapps"
version = "1.7.0"
dependencies = [
"base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.7.0",
"ethcore-rpc 1.7.0",
"ethcore-util 1.7.0",
"fetch 0.1.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-hash-fetch 1.7.0",
"parity-reactor 0.1.0",
"parity-ui 1.7.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "ethcore-devtools" name = "ethcore-devtools"
version = "1.7.0" version = "1.7.0"
@ -642,6 +616,7 @@ dependencies = [
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-reactor 0.1.0", "parity-reactor 0.1.0",
@ -661,6 +636,7 @@ dependencies = [
name = "ethcore-secretstore" name = "ethcore-secretstore"
version = "1.0.0" version = "1.0.0"
dependencies = [ dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.7.0", "ethcore-devtools 1.7.0",
"ethcore-ipc 1.7.0", "ethcore-ipc 1.7.0",
"ethcore-ipc-codegen 1.7.0", "ethcore-ipc-codegen 1.7.0",
@ -668,9 +644,18 @@ dependencies = [
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"ethcrypto 0.1.0", "ethcrypto 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -946,6 +931,14 @@ dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "heck"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "hidapi" name = "hidapi"
version = "0.3.1" version = "0.3.1"
@ -1080,7 +1073,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "jsonrpc-core" name = "jsonrpc-core"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1092,7 +1085,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-http-server" name = "jsonrpc-http-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
@ -1105,7 +1098,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-ipc-server" name = "jsonrpc-ipc-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
@ -1117,17 +1110,31 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-macros" name = "jsonrpc-macros"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "jsonrpc-minihttp-server"
version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)",
"tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "jsonrpc-pubsub" name = "jsonrpc-pubsub"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1137,7 +1144,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-server-utils" name = "jsonrpc-server-utils"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1148,7 +1155,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-tcp-server" name = "jsonrpc-tcp-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
@ -1396,6 +1403,25 @@ dependencies = [
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "native-contract-generator"
version = "0.1.0"
dependencies = [
"ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"heck 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "native-contracts"
version = "0.1.0"
dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-util 1.7.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contract-generator 0.1.0",
]
[[package]] [[package]]
name = "native-tls" name = "native-tls"
version = "0.1.0" version = "0.1.0"
@ -1577,6 +1603,38 @@ dependencies = [
"stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "parity-dapps"
version = "1.7.0"
dependencies = [
"base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.7.0",
"ethcore-util 1.7.0",
"fetch 0.1.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-hash-fetch 1.7.0",
"parity-reactor 0.1.0",
"parity-ui 1.7.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "parity-dapps-glue" name = "parity-dapps-glue"
version = "1.7.0" version = "1.7.0"
@ -1595,7 +1653,7 @@ dependencies = [
name = "parity-hash-fetch" name = "parity-hash-fetch"
version = "1.7.0" version = "1.7.0"
dependencies = [ dependencies = [
"ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"fetch 0.1.0", "fetch 0.1.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1705,7 +1763,7 @@ dependencies = [
name = "parity-updater" name = "parity-updater"
version = "1.7.0" version = "1.7.0"
dependencies = [ dependencies = [
"ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.7.0", "ethcore 1.7.0",
"ethcore-ipc 1.7.0", "ethcore-ipc 1.7.0",
"ethcore-ipc-codegen 1.7.0", "ethcore-ipc-codegen 1.7.0",
@ -1799,6 +1857,14 @@ name = "podio"
version = "0.1.5" version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "pretty_assertions"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "primal" name = "primal"
version = "0.2.3" version = "0.2.3"
@ -2404,6 +2470,21 @@ dependencies = [
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "tokio-minihttp"
version = "0.1.0"
source = "git+https://github.com/tomusdrw/tokio-minihttp#8acbafae3e77e7f7eb516b441ec84695580221dd"
dependencies = [
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "tokio-named-pipes" name = "tokio-named-pipes"
version = "0.1.0" version = "0.1.0"
@ -2414,6 +2495,22 @@ dependencies = [
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "tokio-proto"
version = "0.1.0"
source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3"
dependencies = [
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "tokio-proto" name = "tokio-proto"
version = "0.1.0" version = "0.1.0"
@ -2505,6 +2602,11 @@ name = "unicode-normalization"
version = "0.1.2" version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicode-segmentation"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "unicode-xid" name = "unicode-xid"
version = "0.0.4" version = "0.0.4"
@ -2674,13 +2776,14 @@ dependencies = [
"checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "<none>" "checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "<none>"
"checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf"
"checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" "checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf"
"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8"
"checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" "checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8"
"checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f"
"checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91"
"checksum elastic-array 0.6.0 (git+https://github.com/paritytech/elastic-array)" = "<none>" "checksum elastic-array 0.6.0 (git+https://github.com/paritytech/elastic-array)" = "<none>"
"checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83" "checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83"
"checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>" "checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>"
"checksum ethabi 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f6cc4c1acd005f48e1d17b06a461adac8fb6eeeb331fbf19a0e656fba91cd" "checksum ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63df67d0af5e3cb906b667ca1a6e00baffbed87d0d8f5f78468a1f5eb3a66345"
"checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa"
"checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb"
"checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d" "checksum futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8e51e7f9c150ba7fd4cee9df8bf6ea3dea5b63b68955ddad19ccd35b71dcfb4d"
@ -2690,6 +2793,7 @@ dependencies = [
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
"checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1"
"checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" "checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c"
"checksum heck 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f807d2f64cc044a6bcf250ff23e59be0deec7a16612c014f962a06fa7e020f9"
"checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "<none>" "checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "<none>"
"checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae"
"checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "<none>" "checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "<none>"
@ -2706,6 +2810,7 @@ dependencies = [
"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
@ -2769,6 +2874,7 @@ dependencies = [
"checksum phf_shared 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "fee4d039930e4f45123c9b15976cf93a499847b6483dc09c42ea0ec4940f2aa6" "checksum phf_shared 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "fee4d039930e4f45123c9b15976cf93a499847b6483dc09c42ea0ec4940f2aa6"
"checksum pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8cee804ecc7eaf201a4a207241472cc870e825206f6c031e3ee2a72fa425f2fa" "checksum pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8cee804ecc7eaf201a4a207241472cc870e825206f6c031e3ee2a72fa425f2fa"
"checksum podio 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e5422a1ee1bc57cc47ae717b0137314258138f38fd5f3cea083f43a9725383a0" "checksum podio 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e5422a1ee1bc57cc47ae717b0137314258138f38fd5f3cea083f43a9725383a0"
"checksum pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2412f3332a07c7a2a50168988dcc184f32180a9758ad470390e5f55e089f6b6e"
"checksum primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0e31b86efadeaeb1235452171a66689682783149a6249ff334a2c5d8218d00a4" "checksum primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0e31b86efadeaeb1235452171a66689682783149a6249ff334a2c5d8218d00a4"
"checksum primal-bit 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "464a91febc06166783d4f5ba3577b5ed8dda8e421012df80bfe48a971ed7be8f" "checksum primal-bit 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "464a91febc06166783d4f5ba3577b5ed8dda8e421012df80bfe48a971ed7be8f"
"checksum primal-check 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "647c81b67bb9551a7b88d0bcd785ac35b7d0bf4b2f358683d7c2375d04daec51" "checksum primal-check 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "647c81b67bb9551a7b88d0bcd785ac35b7d0bf4b2f358683d7c2375d04daec51"
@ -2838,7 +2944,9 @@ dependencies = [
"checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b" "checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b"
"checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" "checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473"
"checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "<none>" "checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "<none>"
"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>"
"checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "<none>" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "<none>"
"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "<none>"
"checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808"
"checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162"
"checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" "checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a"
@ -2850,6 +2958,7 @@ dependencies = [
"checksum unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "13a5906ca2b98c799f4b1ab4557b76367ebd6ae5ef14930ec841c74aed5f3764" "checksum unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "13a5906ca2b98c799f4b1ab4557b76367ebd6ae5ef14930ec841c74aed5f3764"
"checksum unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c1f7ceb96afdfeedee42bade65a0d585a6a0106f681b6749c8ff4daa8df30b3f" "checksum unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c1f7ceb96afdfeedee42bade65a0d585a6a0106f681b6749c8ff4daa8df30b3f"
"checksum unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "26643a2f83bac55f1976fb716c10234485f9202dcd65cfbdf9da49867b271172" "checksum unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "26643a2f83bac55f1976fb716c10234485f9202dcd65cfbdf9da49867b271172"
"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3"
"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" "checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc"
"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91" "checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91"
"checksum untrusted 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "193df64312e3515fd983ded55ad5bcaa7647a035804828ed757e832ce6029ef3" "checksum untrusted 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "193df64312e3515fd983ded55ad5bcaa7647a035804828ed757e832ce6029ef3"

View File

@ -26,7 +26,6 @@ app_dirs = "1.1.1"
futures = "0.1" futures = "0.1"
fdlimit = "0.1" fdlimit = "0.1"
ws2_32-sys = "0.2" ws2_32-sys = "0.2"
hyper = { default-features = false, git = "https://github.com/paritytech/hyper" }
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
ethsync = { path = "sync" } ethsync = { path = "sync" }
@ -42,6 +41,7 @@ ethcore-ipc-hypervisor = { path = "ipc/hypervisor" }
ethcore-light = { path = "ethcore/light" } ethcore-light = { path = "ethcore/light" }
ethcore-logger = { path = "logger" } ethcore-logger = { path = "logger" }
ethcore-stratum = { path = "stratum" } ethcore-stratum = { path = "stratum" }
ethkey = { path = "ethkey" }
evmbin = { path = "evmbin" } evmbin = { path = "evmbin" }
rlp = { path = "util/rlp" } rlp = { path = "util/rlp" }
rpc-cli = { path = "rpc_cli" } rpc-cli = { path = "rpc_cli" }
@ -51,8 +51,9 @@ parity-ipfs-api = { path = "ipfs" }
parity-updater = { path = "updater" } parity-updater = { path = "updater" }
parity-reactor = { path = "util/reactor" } parity-reactor = { path = "util/reactor" }
parity-local-store = { path = "local-store" } parity-local-store = { path = "local-store" }
ethcore-dapps = { path = "dapps", optional = true }
path = { path = "util/path" } path = { path = "util/path" }
parity-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.103", optional = true} clippy = { version = "0.0.103", optional = true}
ethcore-secretstore = { path = "secret_store", optional = true } ethcore-secretstore = { path = "secret_store", optional = true }
@ -61,6 +62,7 @@ rustc_version = "0.2"
[dev-dependencies] [dev-dependencies]
ethcore-ipc-tests = { path = "ipc/tests" } ethcore-ipc-tests = { path = "ipc/tests" }
pretty_assertions = "0.1"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.2"
@ -72,18 +74,18 @@ daemonize = "0.2"
default = ["ui-precompiled"] default = ["ui-precompiled"]
ui = [ ui = [
"dapps", "dapps",
"ethcore-dapps/ui", "parity-dapps/ui",
"ethcore-signer/ui", "ethcore-signer/ui",
] ]
ui-precompiled = [ ui-precompiled = [
"dapps", "dapps",
"ethcore-signer/ui-precompiled", "ethcore-signer/ui-precompiled",
"ethcore-dapps/ui-precompiled", "parity-dapps/ui-precompiled",
] ]
dapps = ["ethcore-dapps"] dapps = ["parity-dapps"]
ipc = ["ethcore/ipc", "ethsync/ipc"] ipc = ["ethcore/ipc", "ethsync/ipc"]
jit = ["ethcore/jit"] jit = ["ethcore/jit"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"]
json-tests = ["ethcore/json-tests"] json-tests = ["ethcore/json-tests"]
test-heavy = ["ethcore/test-heavy"] test-heavy = ["ethcore/test-heavy"]
ethkey-cli = ["ethcore/ethkey-cli"] ethkey-cli = ["ethcore/ethkey-cli"]

View File

@ -1,6 +1,6 @@
[package] [package]
description = "Parity Dapps crate" description = "Parity Dapps crate"
name = "ethcore-dapps" name = "parity-dapps"
version = "1.7.0" version = "1.7.0"
license = "GPL-3.0" license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
@ -28,11 +28,8 @@ zip = { version = "0.1", default-features = false }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
# TODO [ToDr] Temporary solution, server should be merged with RPC.
jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-rpc = { path = "../rpc" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
parity-hash-fetch = { path = "../hash-fetch" } parity-hash-fetch = { path = "../hash-fetch" }
@ -42,7 +39,7 @@ parity-ui = { path = "./ui" }
clippy = { version = "0.0.103", optional = true} clippy = { version = "0.0.103", optional = true}
[features] [features]
dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] dev = ["clippy", "ethcore-util/dev"]
ui = ["parity-ui/no-precompiled-js"] ui = ["parity-ui/no-precompiled-js"]
ui-precompiled = ["parity-ui/use-precompiled-js"] ui-precompiled = ["parity-ui/use-precompiled-js"]

View File

@ -14,7 +14,6 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use unicase::UniCase; use unicase::UniCase;
use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::{server, net, Decoder, Encoder, Next, Control};
use hyper::header; use hyper::header;
@ -26,48 +25,49 @@ use apps::fetcher::Fetcher;
use handlers::extract_url; use handlers::extract_url;
use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath};
use jsonrpc_http_server; use jsonrpc_http_server::{self, AccessControlAllowOrigin};
use jsonrpc_server_utils::cors;
#[derive(Clone)] #[derive(Clone)]
pub struct RestApi { pub struct RestApi<F> {
cors_domains: Option<Vec<cors::AccessControlAllowOrigin>>, // TODO [ToDr] cors_domains should be handled by the server to avoid duplicated logic.
endpoints: Arc<Endpoints>, // RequestMiddleware should be able to tell that cors headers should be included.
fetcher: Arc<Fetcher>, cors_domains: Option<Vec<AccessControlAllowOrigin>>,
apps: Vec<App>,
fetcher: F,
} }
impl RestApi { impl<F: Fetcher + Clone> RestApi<F> {
pub fn new(cors_domains: Vec<cors::AccessControlAllowOrigin>, endpoints: Arc<Endpoints>, fetcher: Arc<Fetcher>) -> Box<Endpoint> { pub fn new(cors_domains: Vec<AccessControlAllowOrigin>, endpoints: &Endpoints, fetcher: F) -> Box<Endpoint> {
Box::new(RestApi { Box::new(RestApi {
cors_domains: Some(cors_domains), cors_domains: Some(cors_domains),
endpoints: endpoints, apps: Self::list_apps(endpoints),
fetcher: fetcher, fetcher: fetcher,
}) })
} }
fn list_apps(&self) -> Vec<App> { fn list_apps(endpoints: &Endpoints) -> Vec<App> {
self.endpoints.iter().filter_map(|(ref k, ref e)| { endpoints.iter().filter_map(|(ref k, ref e)| {
e.info().map(|ref info| App::from_info(k, info)) e.info().map(|ref info| App::from_info(k, info))
}).collect() }).collect()
} }
} }
impl Endpoint for RestApi { impl<F: Fetcher + Clone> Endpoint for RestApi<F> {
fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<Handler> { fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<Handler> {
Box::new(RestApiRouter::new(self.clone(), path, control)) Box::new(RestApiRouter::new((*self).clone(), path, control))
} }
} }
struct RestApiRouter { struct RestApiRouter<F> {
api: RestApi, api: RestApi<F>,
cors_header: Option<header::AccessControlAllowOrigin>, cors_header: Option<header::AccessControlAllowOrigin>,
path: Option<EndpointPath>, path: Option<EndpointPath>,
control: Option<Control>, control: Option<Control>,
handler: Box<Handler>, handler: Box<Handler>,
} }
impl RestApiRouter { impl<F: Fetcher> RestApiRouter<F> {
fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { fn new(api: RestApi<F>, path: EndpointPath, control: Control) -> Self {
RestApiRouter { RestApiRouter {
path: Some(path), path: Some(path),
cors_header: None, cors_header: None,
@ -114,7 +114,7 @@ impl RestApiRouter {
} }
} }
impl server::Handler<net::HttpStream> for RestApiRouter { impl<F: Fetcher> server::Handler<net::HttpStream> for RestApiRouter<F> {
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next { fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next {
self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into();
@ -142,7 +142,7 @@ impl server::Handler<net::HttpStream> for RestApiRouter {
if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() } if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() }
let handler = endpoint.and_then(|v| match v { let handler = endpoint.and_then(|v| match v {
"apps" => Some(response::as_json(&self.api.list_apps())), "apps" => Some(response::as_json(&self.api.apps)),
"ping" => Some(response::ping()), "ping" => Some(response::ping()),
"content" => self.resolve_content(hash, path, control), "content" => self.resolve_content(hash, path, control),
_ => None _ => None

View File

@ -47,7 +47,8 @@ pub trait Fetcher: Send + Sync + 'static {
fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler>; fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler>;
} }
pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + Send + Sync + 'static = URLHintContract> { #[derive(Clone)]
pub struct ContentFetcher<F: Fetch + Clone = FetchClient, R: URLHint + Clone + 'static = URLHintContract> {
dapps_path: PathBuf, dapps_path: PathBuf,
resolver: R, resolver: R,
cache: Arc<Mutex<ContentCache>>, cache: Arc<Mutex<ContentCache>>,
@ -57,14 +58,14 @@ pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + Send + Sync + 'st
fetch: F, fetch: F,
} }
impl<R: URLHint + Send + Sync + 'static, F: Fetch> Drop for ContentFetcher<F, R> { impl<R: URLHint + Clone + 'static, F: Fetch + Clone> Drop for ContentFetcher<F, R> {
fn drop(&mut self) { fn drop(&mut self) {
// Clear cache path // Clear cache path
let _ = fs::remove_dir_all(&self.dapps_path); let _ = fs::remove_dir_all(&self.dapps_path);
} }
} }
impl<R: URLHint + Send + Sync + 'static, F: Fetch> ContentFetcher<F, R> { impl<R: URLHint + Clone + 'static, F: Fetch + Clone> ContentFetcher<F, R> {
pub fn new(resolver: R, sync_status: Arc<SyncStatus>, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self { pub fn new(resolver: R, sync_status: Arc<SyncStatus>, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self {
let mut dapps_path = env::temp_dir(); let mut dapps_path = env::temp_dir();
@ -97,7 +98,7 @@ impl<R: URLHint + Send + Sync + 'static, F: Fetch> ContentFetcher<F, R> {
} }
} }
impl<R: URLHint + Send + Sync + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> { impl<R: URLHint + Clone + 'static, F: Fetch + Clone> Fetcher for ContentFetcher<F, R> {
fn contains(&self, content_id: &str) -> bool { fn contains(&self, content_id: &str) -> bool {
{ {
let mut cache = self.cache.lock(); let mut cache = self.cache.lock();
@ -233,6 +234,7 @@ mod tests {
use page::LocalPageEndpoint; use page::LocalPageEndpoint;
use super::{ContentFetcher, Fetcher}; use super::{ContentFetcher, Fetcher};
#[derive(Clone)]
struct FakeResolver; struct FakeResolver;
impl URLHint for FakeResolver { impl URLHint for FakeResolver {
fn resolve(&self, _id: Bytes) -> Option<URLHintResult> { fn resolve(&self, _id: Bytes) -> Option<URLHintResult> {

View File

@ -16,9 +16,10 @@
//! URL Endpoint traits //! URL Endpoint traits
use hyper::{self, server, net};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use hyper::{self, server, net};
#[derive(Debug, PartialEq, Default, Clone)] #[derive(Debug, PartialEq, Default, Clone)]
pub struct EndpointPath { pub struct EndpointPath {
pub app_id: String, pub app_id: String,

View File

@ -1,44 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Authorization Handlers
use hyper::{server, Decoder, Encoder, Next};
use hyper::net::HttpStream;
use hyper::status::StatusCode;
pub struct AuthRequiredHandler;
impl server::Handler<HttpStream> for AuthRequiredHandler {
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
Next::write()
}
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
Next::write()
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
res.set_status(StatusCode::Unauthorized);
res.headers_mut().set_raw("WWW-Authenticate", vec![b"Basic realm=\"Parity\"".to_vec()]);
Next::write()
}
fn on_response_writable(&mut self, _encoder: &mut Encoder<HttpStream>) -> Next {
Next::end()
}
}

View File

@ -16,14 +16,12 @@
//! Hyper handlers implementations. //! Hyper handlers implementations.
mod auth;
mod content; mod content;
mod echo; mod echo;
mod fetch; mod fetch;
mod redirect; mod redirect;
mod streaming; mod streaming;
pub use self::auth::AuthRequiredHandler;
pub use self::content::ContentHandler; pub use self::content::ContentHandler;
pub use self::echo::EchoHandler; pub use self::echo::EchoHandler;
pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse}; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse};

View File

@ -34,9 +34,7 @@ extern crate zip;
extern crate jsonrpc_core; extern crate jsonrpc_core;
extern crate jsonrpc_http_server; extern crate jsonrpc_http_server;
extern crate jsonrpc_server_utils;
extern crate ethcore_rpc;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate fetch; extern crate fetch;
extern crate parity_dapps_glue as parity_dapps; extern crate parity_dapps_glue as parity_dapps;
@ -61,7 +59,6 @@ mod apps;
mod page; mod page;
mod router; mod router;
mod handlers; mod handlers;
mod rpc;
mod api; mod api;
mod proxypac; mod proxypac;
mod url; mod url;
@ -69,23 +66,16 @@ mod web;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
use std::path::{Path, PathBuf}; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::net::SocketAddr;
use std::collections::HashMap; use std::collections::HashMap;
use jsonrpc_core::{Middleware, MetaIoHandler}; use jsonrpc_http_server::{self as http, hyper, AccessControlAllowOrigin};
use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote;
pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin};
pub use jsonrpc_http_server::hyper;
use ethcore_rpc::Metadata; use fetch::Fetch;
use fetch::{Fetch, Client as FetchClient};
use hash_fetch::urlhint::ContractClient;
use parity_reactor::Remote; use parity_reactor::Remote;
use router::auth::{Authorization, NoAuth, HttpBasicAuth};
use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; pub use hash_fetch::urlhint::ContractClient;
/// Indicates sync status /// Indicates sync status
pub trait SyncStatus: Send + Sync { pub trait SyncStatus: Send + Sync {
@ -107,173 +97,76 @@ impl<F> WebProxyTokens for F where F: Fn(String) -> bool + Send + Sync {
fn is_web_proxy_token_valid(&self, token: &str) -> bool { self(token.to_owned()) } fn is_web_proxy_token_valid(&self, token: &str) -> bool { self(token.to_owned()) }
} }
/// Webapps HTTP+RPC server build. /// Dapps server as `jsonrpc-http-server` request middleware.
pub struct ServerBuilder<T: Fetch = FetchClient> { pub struct Middleware<F: Fetch + Clone> {
router: router::Router<apps::fetcher::ContentFetcher<F>>,
}
impl<F: Fetch + Clone> Middleware<F> {
/// Creates new Dapps server middleware.
pub fn new(
remote: Remote,
signer_address: Option<(String, u16)>,
dapps_path: PathBuf, dapps_path: PathBuf,
extra_dapps: Vec<PathBuf>, extra_dapps: Vec<PathBuf>,
registrar: Arc<ContractClient>, registrar: Arc<ContractClient>,
sync_status: Arc<SyncStatus>, sync_status: Arc<SyncStatus>,
web_proxy_tokens: Arc<WebProxyTokens>, web_proxy_tokens: Arc<WebProxyTokens>,
signer_address: Option<(String, u16)>, fetch: F,
allowed_hosts: Option<Vec<Host>>, ) -> Self {
extra_cors: Option<Vec<AccessControlAllowOrigin>>, let content_fetcher = apps::fetcher::ContentFetcher::new(
remote: Remote, hash_fetch::urlhint::URLHintContract::new(registrar),
fetch: Option<T>, sync_status,
} signer_address.clone(),
remote.clone(),
fetch.clone(),
);
let endpoints = apps::all_endpoints(
dapps_path,
extra_dapps,
signer_address.clone(),
web_proxy_tokens,
remote.clone(),
fetch.clone(),
);
impl ServerBuilder { let cors_domains = cors_domains(signer_address.clone());
/// Construct new dapps server
pub fn new<P: AsRef<Path>>(dapps_path: P, registrar: Arc<ContractClient>, remote: Remote) -> Self { let special = {
ServerBuilder { let mut special = HashMap::new();
dapps_path: dapps_path.as_ref().to_owned(), special.insert(router::SpecialEndpoint::Rpc, None);
extra_dapps: vec![], special.insert(router::SpecialEndpoint::Utils, Some(apps::utils()));
registrar: registrar, special.insert(
sync_status: Arc::new(|| false), router::SpecialEndpoint::Api,
web_proxy_tokens: Arc::new(|_| false), Some(api::RestApi::new(cors_domains.clone(), &endpoints, content_fetcher.clone())),
signer_address: None, );
allowed_hosts: Some(vec![]), special
extra_cors: None, };
remote: remote,
fetch: None, let router = router::Router::new(
signer_address,
content_fetcher,
endpoints,
special,
);
Middleware {
router: router,
} }
} }
} }
impl<T: Fetch> ServerBuilder<T> { impl<F: Fetch + Clone> http::RequestMiddleware for Middleware<F> {
/// Set a fetch client to use. fn on_request(&self, req: &hyper::server::Request<hyper::net::HttpStream>, control: &hyper::Control) -> http::RequestMiddlewareAction {
pub fn fetch<X: Fetch>(self, fetch: X) -> ServerBuilder<X> { self.router.on_request(req, control)
ServerBuilder {
dapps_path: self.dapps_path,
extra_dapps: vec![],
registrar: self.registrar,
sync_status: self.sync_status,
web_proxy_tokens: self.web_proxy_tokens,
signer_address: self.signer_address,
allowed_hosts: self.allowed_hosts,
extra_cors: self.extra_cors,
remote: self.remote,
fetch: Some(fetch),
}
}
/// Change default sync status.
pub fn sync_status(mut self, status: Arc<SyncStatus>) -> Self {
self.sync_status = status;
self
}
/// Change default web proxy tokens validator.
pub fn web_proxy_tokens(mut self, tokens: Arc<WebProxyTokens>) -> Self {
self.web_proxy_tokens = tokens;
self
}
/// Change default signer port.
pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self {
self.signer_address = signer_address;
self
}
/// Change allowed hosts.
/// `None` - All hosts are allowed
/// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address)
pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation<Host>) -> Self {
self.allowed_hosts = allowed_hosts.into();
self
}
/// Extra cors headers.
/// `None` - no additional CORS URLs
pub fn extra_cors_headers(mut self, cors: DomainsValidation<AccessControlAllowOrigin>) -> Self {
self.extra_cors = cors.into();
self
}
/// Change extra dapps paths (apart from `dapps_path`)
pub fn extra_dapps<P: AsRef<Path>>(mut self, extra_dapps: &[P]) -> Self {
self.extra_dapps = extra_dapps.iter().map(|p| p.as_ref().to_owned()).collect();
self
}
/// Asynchronously start server with no authentication,
/// returns result with `Server` handle on success or an error.
pub fn start_unsecured_http<S: Middleware<Metadata>>(self, addr: &SocketAddr, handler: MetaIoHandler<Metadata, S>, tokio_remote: TokioRemote) -> Result<Server, ServerError> {
let fetch = self.fetch_client()?;
Server::start_http(
addr,
self.allowed_hosts,
self.extra_cors,
NoAuth,
handler,
self.dapps_path,
self.extra_dapps,
self.signer_address,
self.registrar,
self.sync_status,
self.web_proxy_tokens,
self.remote,
tokio_remote,
fetch,
)
}
/// Asynchronously start server with `HTTP Basic Authentication`,
/// return result with `Server` handle on success or an error.
pub fn start_basic_auth_http<S: Middleware<Metadata>>(self, addr: &SocketAddr, username: &str, password: &str, handler: MetaIoHandler<Metadata, S>, tokio_remote: TokioRemote) -> Result<Server, ServerError> {
let fetch = self.fetch_client()?;
Server::start_http(
addr,
self.allowed_hosts,
self.extra_cors,
HttpBasicAuth::single_user(username, password),
handler,
self.dapps_path,
self.extra_dapps,
self.signer_address,
self.registrar,
self.sync_status,
self.web_proxy_tokens,
self.remote,
tokio_remote,
fetch,
)
}
fn fetch_client(&self) -> Result<T, ServerError> {
match self.fetch.clone() {
Some(fetch) => Ok(fetch),
None => T::new().map_err(|_| ServerError::FetchInitialization),
}
} }
} }
/// Webapps HTTP server. /// Returns a list of CORS domains for API endpoint.
pub struct Server { fn cors_domains(signer_address: Option<(String, u16)>) -> Vec<AccessControlAllowOrigin> {
server: Option<hyper::server::Listening>, use self::apps::{HOME_PAGE, DAPPS_DOMAIN};
}
impl Server { match signer_address {
/// Returns a list of allowed hosts or `None` if all hosts are allowed.
fn allowed_hosts(hosts: Option<Vec<Host>>, bind_address: String) -> Option<Vec<Host>> {
let mut allowed = Vec::new();
match hosts {
Some(hosts) => allowed.extend_from_slice(&hosts),
None => return None,
}
// Add localhost domain as valid too if listening on loopback interface.
allowed.push(bind_address.replace("127.0.0.1", "localhost").into());
allowed.push(bind_address.into());
Some(allowed)
}
/// Returns a list of CORS domains for API endpoint.
fn cors_domains(
signer_address: Option<(String, u16)>,
extra_cors: Option<Vec<AccessControlAllowOrigin>>,
) -> Vec<AccessControlAllowOrigin> {
let basic_cors = match signer_address {
Some(signer_address) => [ Some(signer_address) => [
format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN),
format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1),
@ -283,118 +176,11 @@ impl Server {
format!("https://{}", address(&signer_address)), format!("https://{}", address(&signer_address)),
].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(),
None => vec![], None => vec![],
};
match extra_cors {
None => basic_cors,
Some(extra_cors) => basic_cors.into_iter().chain(extra_cors).collect(),
}
}
fn start_http<A: Authorization + 'static, F: Fetch, T: Middleware<Metadata>>(
addr: &SocketAddr,
hosts: Option<Vec<Host>>,
extra_cors: Option<Vec<AccessControlAllowOrigin>>,
authorization: A,
handler: MetaIoHandler<Metadata, T>,
dapps_path: PathBuf,
extra_dapps: Vec<PathBuf>,
signer_address: Option<(String, u16)>,
registrar: Arc<ContractClient>,
sync_status: Arc<SyncStatus>,
web_proxy_tokens: Arc<WebProxyTokens>,
remote: Remote,
tokio_remote: TokioRemote,
fetch: F,
) -> Result<Server, ServerError> {
let authorization = Arc::new(authorization);
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
hash_fetch::urlhint::URLHintContract::new(registrar),
sync_status,
signer_address.clone(),
remote.clone(),
fetch.clone(),
));
let endpoints = Arc::new(apps::all_endpoints(
dapps_path,
extra_dapps,
signer_address.clone(),
web_proxy_tokens,
remote.clone(),
fetch.clone(),
));
let cors_domains = Self::cors_domains(signer_address.clone(), extra_cors);
let special = Arc::new({
let mut special = HashMap::new();
special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, tokio_remote, cors_domains.clone()));
special.insert(router::SpecialEndpoint::Utils, apps::utils());
special.insert(
router::SpecialEndpoint::Api,
api::RestApi::new(cors_domains, endpoints.clone(), content_fetcher.clone())
);
special
});
let hosts = Self::allowed_hosts(hosts, format!("{}", addr));
hyper::Server::http(addr)?
.handle(move |ctrl| router::Router::new(
ctrl,
signer_address.clone(),
content_fetcher.clone(),
endpoints.clone(),
special.clone(),
authorization.clone(),
hosts.clone(),
))
.map(|(l, srv)| {
::std::thread::spawn(move || {
srv.run();
});
Server {
server: Some(l),
}
})
.map_err(ServerError::from)
}
#[cfg(test)]
/// Returns address that this server is bound to.
pub fn addr(&self) -> &SocketAddr {
self.server.as_ref()
.expect("server is always Some at the start; it's consumed only when object is dropped; qed")
.addrs()
.first()
.expect("You cannot start the server without binding to at least one address; qed")
} }
} }
impl Drop for Server { fn address(address: &(String, u16)) -> String {
fn drop(&mut self) { format!("{}:{}", address.0, address.1)
self.server.take().unwrap().close()
}
}
/// Webapp Server startup error
#[derive(Debug)]
pub enum ServerError {
/// Wrapped `std::io::Error`
IoError(std::io::Error),
/// Other `hyper` error
Other(hyper::error::Error),
/// Fetch service initialization error
FetchInitialization,
}
impl From<hyper::error::Error> for ServerError {
fn from(err: hyper::error::Error) -> Self {
match err {
hyper::error::Error::Io(e) => ServerError::IoError(e),
e => ServerError::Other(e),
}
}
} }
/// Random filename /// Random filename
@ -404,39 +190,18 @@ fn random_filename() -> String {
rng.gen_ascii_chars().take(12).collect() rng.gen_ascii_chars().take(12).collect()
} }
fn address(address: &(String, u16)) -> String {
format!("{}:{}", address.0, address.1)
}
#[cfg(test)] #[cfg(test)]
mod util_tests { mod util_tests {
use super::Server; use super::cors_domains;
use jsonrpc_http_server::AccessControlAllowOrigin; use jsonrpc_http_server::AccessControlAllowOrigin;
#[test]
fn should_return_allowed_hosts() {
// given
let bind_address = "127.0.0.1".to_owned();
// when
let all = Server::allowed_hosts(None, bind_address.clone());
let address = Server::allowed_hosts(Some(Vec::new()), bind_address.clone());
let some = Server::allowed_hosts(Some(vec!["ethcore.io".into()]), bind_address.clone());
// then
assert_eq!(all, None);
assert_eq!(address, Some(vec!["localhost".into(), "127.0.0.1".into()]));
assert_eq!(some, Some(vec!["ethcore.io".into(), "localhost".into(), "127.0.0.1".into()]));
}
#[test] #[test]
fn should_return_cors_domains() { fn should_return_cors_domains() {
// given // given
// when // when
let none = Server::cors_domains(None, None); let none = cors_domains(None);
let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180)), None); let some = cors_domains(Some(("127.0.0.1".into(), 18180)));
let extra = Server::cors_domains(None, Some(vec!["all".into()]));
// then // then
assert_eq!(none, Vec::<AccessControlAllowOrigin>::new()); assert_eq!(none, Vec::<AccessControlAllowOrigin>::new());
@ -448,6 +213,5 @@ mod util_tests {
"https://parity.web3.site:18180".into(), "https://parity.web3.site:18180".into(),
"https://127.0.0.1:18180".into(), "https://127.0.0.1:18180".into(),
]); ]);
assert_eq!(extra, vec![AccessControlAllowOrigin::Any]);
} }
} }

View File

@ -15,24 +15,20 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Router implementation //! Router implementation
//! Processes request handling authorization and dispatching it to proper application. //! Dispatch requests to proper application.
pub mod auth;
mod host_validation;
use address; use address;
use std::cmp; use std::cmp;
use std::sync::Arc;
use std::collections::HashMap; use std::collections::HashMap;
use url::{Url, Host}; use url::{Url, Host};
use hyper::{self, server, header, Next, Encoder, Decoder, Control, StatusCode}; use hyper::{self, server, header, Control, StatusCode};
use hyper::net::HttpStream; use hyper::net::HttpStream;
use jsonrpc_server_utils::hosts; use jsonrpc_http_server as http;
use apps::{self, DAPPS_DOMAIN}; use apps::{self, DAPPS_DOMAIN};
use apps::fetcher::Fetcher; use apps::fetcher::Fetcher;
use endpoint::{Endpoint, Endpoints, EndpointPath}; use endpoint::{Endpoint, Endpoints, EndpointPath, Handler};
use handlers::{self, Redirection, ContentHandler}; use handlers::{self, Redirection, ContentHandler};
/// Special endpoints are accessible on every domain (every dapp) /// Special endpoints are accessible on every domain (every dapp)
@ -44,51 +40,29 @@ pub enum SpecialEndpoint {
None, None,
} }
pub struct Router<A: auth::Authorization + 'static> { pub struct Router<F> {
control: Option<Control>,
signer_address: Option<(String, u16)>, signer_address: Option<(String, u16)>,
endpoints: Arc<Endpoints>, endpoints: Endpoints,
fetch: Arc<Fetcher>, fetch: F,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>, special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
authorization: Arc<A>,
allowed_hosts: Option<Vec<hosts::Host>>,
handler: Box<server::Handler<HttpStream> + Send>,
} }
impl<A: auth::Authorization + 'static> server::Handler<HttpStream> for Router<A> { impl<F: Fetcher + 'static> http::RequestMiddleware for Router<F> {
fn on_request(&self, req: &server::Request<HttpStream>, control: &Control) -> http::RequestMiddlewareAction {
fn on_request(&mut self, req: server::Request<HttpStream>) -> Next {
// Choose proper handler depending on path / domain // Choose proper handler depending on path / domain
let url = handlers::extract_url(&req); let url = handlers::extract_url(req);
let endpoint = extract_endpoint(&url); let endpoint = extract_endpoint(&url);
let referer = extract_referer_endpoint(&req); let referer = extract_referer_endpoint(req);
let is_utils = endpoint.1 == SpecialEndpoint::Utils; let is_utils = endpoint.1 == SpecialEndpoint::Utils;
let is_dapps_domain = endpoint.0.as_ref().map(|endpoint| endpoint.using_dapps_domains).unwrap_or(false);
let is_origin_set = req.headers().get::<http::hyper::header::Origin>().is_some();
let is_get_request = *req.method() == hyper::Method::Get; let is_get_request = *req.method() == hyper::Method::Get;
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req);
// Validate Host header let control = control.clone();
trace!(target: "dapps", "Validating host headers against: {:?}", self.allowed_hosts);
let is_valid = is_utils || host_validation::is_valid(&req, &self.allowed_hosts);
if !is_valid {
debug!(target: "dapps", "Rejecting invalid host header.");
self.handler = host_validation::host_invalid_response();
return self.handler.on_request(req);
}
trace!(target: "dapps", "Checking authorization.");
// Check authorization
let auth = self.authorization.is_authorized(&req);
if let auth::Authorized::No(handler) = auth {
debug!(target: "dapps", "Authorization denied.");
self.handler = handler;
return self.handler.on_request(req);
}
let control = self.control.take().expect("on_request is called only once; control is always defined at start; qed");
debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint);
self.handler = match (endpoint.0, endpoint.1, referer) { let handler: Option<Box<Handler>> = match (endpoint.0, endpoint.1, referer) {
// Handle invalid web requests that we can recover from // Handle invalid web requests that we can recover from
(ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url)))
if referer.app_id == apps::WEB_PATH if referer.app_id == apps::WEB_PATH
@ -100,26 +74,27 @@ impl<A: auth::Authorization + 'static> server::Handler<HttpStream> for Router<A>
let len = cmp::min(referer_url.path.len(), 2); // /web/<encoded>/ let len = cmp::min(referer_url.path.len(), 2); // /web/<encoded>/
let base = referer_url.path[..len].join("/"); let base = referer_url.path[..len].join("/");
let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); let requested = url.map(|u| u.path.join("/")).unwrap_or_default();
Redirection::boxed(&format!("/{}/{}", base, requested)) Some(Redirection::boxed(&format!("/{}/{}", base, requested)))
}, },
// First check special endpoints // First check special endpoints
(ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => {
trace!(target: "dapps", "Resolving to special endpoint."); trace!(target: "dapps", "Resolving to special endpoint.");
self.special.get(endpoint) self.special.get(endpoint)
.expect("special known to contain key; qed") .expect("special known to contain key; qed")
.to_async_handler(path.clone().unwrap_or_default(), control) .as_ref()
.map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control))
}, },
// Then delegate to dapp // Then delegate to dapp
(Some(ref path), _, _) if self.endpoints.contains_key(&path.app_id) => { (Some(ref path), _, _) if self.endpoints.contains_key(&path.app_id) => {
trace!(target: "dapps", "Resolving to local/builtin dapp."); trace!(target: "dapps", "Resolving to local/builtin dapp.");
self.endpoints.get(&path.app_id) Some(self.endpoints.get(&path.app_id)
.expect("endpoints known to contain key; qed") .expect("endpoints known to contain key; qed")
.to_async_handler(path.clone(), control) .to_async_handler(path.clone(), control))
}, },
// Try to resolve and fetch the dapp // Try to resolve and fetch the dapp
(Some(ref path), _, _) if self.fetch.contains(&path.app_id) => { (Some(ref path), _, _) if self.fetch.contains(&path.app_id) => {
trace!(target: "dapps", "Resolving to fetchable content."); trace!(target: "dapps", "Resolving to fetchable content.");
self.fetch.to_async_handler(path.clone(), control) Some(self.fetch.to_async_handler(path.clone(), control))
}, },
// NOTE [todr] /home is redirected to home page since some users may have the redirection cached // NOTE [todr] /home is redirected to home page since some users may have the redirection cached
// (in the past we used 301 instead of 302) // (in the past we used 301 instead of 302)
@ -128,82 +103,61 @@ impl<A: auth::Authorization + 'static> server::Handler<HttpStream> for Router<A>
// 404 for non-existent content // 404 for non-existent content
(Some(ref path), _, _) if is_get_request && path.app_id != "home" => { (Some(ref path), _, _) if is_get_request && path.app_id != "home" => {
trace!(target: "dapps", "Resolving to 404."); trace!(target: "dapps", "Resolving to 404.");
Box::new(ContentHandler::error( Some(Box::new(ContentHandler::error(
StatusCode::NotFound, StatusCode::NotFound,
"404 Not Found", "404 Not Found",
"Requested content was not found.", "Requested content was not found.",
None, None,
self.signer_address.clone(), self.signer_address.clone(),
)) )))
}, },
// Redirect any other GET request to signer. // Redirect any other GET request to signer.
_ if is_get_request => { _ if is_get_request => {
if let Some(ref signer_address) = self.signer_address { if let Some(ref signer_address) = self.signer_address {
trace!(target: "dapps", "Redirecting to signer interface."); trace!(target: "dapps", "Redirecting to signer interface.");
Redirection::boxed(&format!("http://{}", address(signer_address))) Some(Redirection::boxed(&format!("http://{}", address(signer_address))))
} else { } else {
trace!(target: "dapps", "Signer disabled, returning 404."); trace!(target: "dapps", "Signer disabled, returning 404.");
Box::new(ContentHandler::error( Some(Box::new(ContentHandler::error(
StatusCode::NotFound, StatusCode::NotFound,
"404 Not Found", "404 Not Found",
"Your homepage is not available when Trusted Signer is disabled.", "Your homepage is not available when Trusted Signer is disabled.",
Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."),
self.signer_address.clone(), self.signer_address.clone(),
)) )))
} }
}, },
// RPC by default // RPC by default
_ => { _ => {
trace!(target: "dapps", "Resolving to RPC call."); trace!(target: "dapps", "Resolving to RPC call.");
self.special.get(&SpecialEndpoint::Rpc) None
.expect("RPC endpoint always stored; qed")
.to_async_handler(EndpointPath::default(), control)
} }
}; };
// Delegate on_request to proper handler match handler {
self.handler.on_request(req) Some(handler) => http::RequestMiddlewareAction::Respond {
should_validate_hosts: !(is_utils || is_dapps_domain),
handler: handler,
},
None => http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: !is_origin_set,
},
} }
/// This event occurs each time the `Request` is ready to be read from.
fn on_request_readable(&mut self, decoder: &mut Decoder<HttpStream>) -> Next {
self.handler.on_request_readable(decoder)
}
/// This event occurs after the first time this handled signals `Next::write()`.
fn on_response(&mut self, response: &mut server::Response) -> Next {
self.handler.on_response(response)
}
/// This event occurs each time the `Response` is ready to be written to.
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
self.handler.on_response_writable(encoder)
} }
} }
impl<A: auth::Authorization> Router<A> { impl<F> Router<F> {
pub fn new( pub fn new(
control: Control,
signer_address: Option<(String, u16)>, signer_address: Option<(String, u16)>,
content_fetcher: Arc<Fetcher>, content_fetcher: F,
endpoints: Arc<Endpoints>, endpoints: Endpoints,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>, special: HashMap<SpecialEndpoint, Option<Box<Endpoint>>>,
authorization: Arc<A>,
allowed_hosts: Option<Vec<hosts::Host>>,
) -> Self { ) -> Self {
let handler = special.get(&SpecialEndpoint::Utils)
.expect("Utils endpoint always stored; qed")
.to_handler(EndpointPath::default());
Router { Router {
control: Some(control),
signer_address: signer_address, signer_address: signer_address,
endpoints: endpoints, endpoints: endpoints,
fetch: content_fetcher, fetch: content_fetcher,
special: special, special: special,
authorization: authorization,
allowed_hosts: allowed_hosts,
handler: handler,
} }
} }
} }

View File

@ -1,106 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! HTTP Authorization implementations
use std::collections::HashMap;
use hyper::{server, net, header, status};
use endpoint::Handler;
use handlers::{AuthRequiredHandler, ContentHandler};
/// Authorization result
pub enum Authorized {
/// Authorization was successful.
Yes,
/// Unsuccessful authorization. Handler for further work is returned.
No(Box<Handler>),
}
/// Authorization interface
pub trait Authorization : Send + Sync {
/// Checks if authorization is valid.
fn is_authorized(&self, req: &server::Request<net::HttpStream>)-> Authorized;
}
/// HTTP Basic Authorization handler
pub struct HttpBasicAuth {
users: HashMap<String, String>,
}
/// No-authorization implementation (authorization disabled)
pub struct NoAuth;
impl Authorization for NoAuth {
fn is_authorized(&self, _req: &server::Request<net::HttpStream>)-> Authorized {
Authorized::Yes
}
}
impl Authorization for HttpBasicAuth {
fn is_authorized(&self, req: &server::Request<net::HttpStream>) -> Authorized {
let auth = self.check_auth(&req);
match auth {
Access::Denied => {
Authorized::No(Box::new(ContentHandler::error(
status::StatusCode::Unauthorized,
"Unauthorized",
"You need to provide valid credentials to access this page.",
None,
None,
)))
},
Access::AuthRequired => {
Authorized::No(Box::new(AuthRequiredHandler))
},
Access::Granted => {
Authorized::Yes
},
}
}
}
#[derive(Debug)]
enum Access {
Granted,
Denied,
AuthRequired,
}
impl HttpBasicAuth {
/// Creates `HttpBasicAuth` instance with only one user.
pub fn single_user(username: &str, password: &str) -> Self {
let mut users = HashMap::new();
users.insert(username.to_owned(), password.to_owned());
HttpBasicAuth {
users: users
}
}
fn is_authorized(&self, username: &str, password: &str) -> bool {
self.users.get(&username.to_owned()).map_or(false, |pass| pass == password)
}
fn check_auth(&self, req: &server::Request<net::HttpStream>) -> Access {
match req.headers().get::<header::Authorization<header::Basic>>() {
Some(&header::Authorization(
header::Basic { ref username, password: Some(ref password) }
)) if self.is_authorized(username, password) => Access::Granted,
Some(_) => Access::Denied,
None => Access::AuthRequired,
}
}
}

View File

@ -1,42 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use apps::DAPPS_DOMAIN;
use hyper::{server, header, StatusCode};
use hyper::net::HttpStream;
use handlers::ContentHandler;
use jsonrpc_http_server;
use jsonrpc_server_utils::hosts;
pub fn is_valid(req: &server::Request<HttpStream>, allowed_hosts: &Option<Vec<hosts::Host>>) -> bool {
let header_valid = jsonrpc_http_server::is_host_allowed(req, allowed_hosts);
match (header_valid, req.headers().get::<header::Host>()) {
(true, _) => true,
(_, Some(host)) => host.hostname.ends_with(DAPPS_DOMAIN),
_ => false,
}
}
pub fn host_invalid_response() -> Box<server::Handler<HttpStream> + Send> {
Box::new(ContentHandler::error(StatusCode::Forbidden,
"Current Host Is Disallowed",
"You are trying to access your node using incorrect address.",
Some("Use allowed URL or specify different <code>hosts</code> CLI options."),
None,
))
}

View File

@ -66,14 +66,14 @@ impl<T: Middleware<Metadata>> Endpoint for RpcEndpoint<T> {
#[derive(Default)] #[derive(Default)]
struct NoopMiddleware; struct NoopMiddleware;
impl http::RequestMiddleware for NoopMiddleware { impl http::RequestMiddleware for NoopMiddleware {
fn on_request(&self, request: &http::hyper::server::Request<http::hyper::net::HttpStream>) -> http::RequestMiddlewareAction { fn on_request(&self, request: &http::hyper::server::Request<http::hyper::net::HttpStream>, _control: &http::hyper::Control) -> http::RequestMiddlewareAction {
http::RequestMiddlewareAction::Proceed { http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: request.headers().get::<http::hyper::header::Origin>().is_none(), should_continue_on_invalid_cors: request.headers().get::<http::hyper::header::Origin>().is_none(),
} }
} }
} }
struct MetadataExtractor; pub struct MetadataExtractor;
impl HttpMetaExtractor<Metadata> for MetadataExtractor { impl HttpMetaExtractor<Metadata> for MetadataExtractor {
fn read_metadata(&self, request: &http::hyper::server::Request<http::hyper::net::HttpStream>) -> Metadata { fn read_metadata(&self, request: &http::hyper::server::Request<http::hyper::net::HttpStream>) -> Metadata {
let dapp_id = request.headers().get::<http::hyper::header::Origin>() let dapp_id = request.headers().get::<http::hyper::header::Origin>()

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve, serve_with_registrar, serve_extra_cors, request, assert_security_headers}; use tests::helpers::{serve, serve_with_registrar, request, assert_security_headers};
#[test] #[test]
fn should_return_error() { fn should_return_error() {
@ -195,26 +195,3 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_port() {
response.assert_status("HTTP/1.1 200 OK"); response.assert_status("HTTP/1.1 200 OK");
response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180");
} }
#[test]
fn should_return_extra_cors_headers() {
// given
let server = serve_extra_cors(Some(vec!["all".to_owned()]));
// when
let response = request(server,
"\
POST /api/ping HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Origin: http://somedomain.io\r\n\
Connection: close\r\n\
\r\n\
{}
"
);
// then
response.assert_status("HTTP/1.1 200 OK");
response.assert_header("Access-Control-Allow-Origin", "http://somedomain.io");
}

View File

@ -1,80 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve_with_auth, request, assert_security_headers_for_embed};
#[test]
fn should_require_authorization() {
// given
let server = serve_with_auth("test", "test");
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned());
assert_eq!(response.headers.get(0).unwrap(), "WWW-Authenticate: Basic realm=\"Parity\"");
}
#[test]
fn should_reject_on_invalid_auth() {
// given
let server = serve_with_auth("test", "test");
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned());
assert!(response.body.contains("Unauthorized"), response.body);
assert_eq!(response.headers_raw.contains("WWW-Authenticate"), false);
}
#[test]
fn should_allow_on_valid_auth() {
// given
let server = serve_with_auth("Aladdin", "OpenSesame");
// when
let response = request(server,
"\
GET /ui/ HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
assert_security_headers_for_embed(&response.headers);
}

View File

@ -16,18 +16,20 @@
use std::env; use std::env;
use std::str; use std::str;
use std::ops::Deref; use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use env_logger::LogBuilder; use env_logger::LogBuilder;
use ethcore_rpc::Metadata; use jsonrpc_core::IoHandler;
use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::{self as http, Host, DomainsValidation};
use ServerBuilder;
use Server;
use fetch::Fetch;
use devtools::http_client; use devtools::http_client;
use hash_fetch::urlhint::ContractClient;
use fetch::{Fetch, Client as FetchClient};
use parity_reactor::{EventLoop, Remote}; use parity_reactor::{EventLoop, Remote};
use {Middleware, SyncStatus, WebProxyTokens};
mod registrar; mod registrar;
mod fetch; mod fetch;
@ -50,7 +52,7 @@ pub struct ServerLoop {
pub event_loop: EventLoop, pub event_loop: EventLoop,
} }
impl Deref for ServerLoop { impl ::std::ops::Deref for ServerLoop {
type Target = Server; type Target = Server;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
@ -58,7 +60,7 @@ impl Deref for ServerLoop {
} }
} }
pub fn init_server<F, B>(process: F, io: MetaIoHandler<Metadata>, remote: Remote) -> (ServerLoop, Arc<FakeRegistrar>) where pub fn init_server<F, B>(process: F, io: IoHandler, remote: Remote) -> (ServerLoop, Arc<FakeRegistrar>) where
F: FnOnce(ServerBuilder) -> ServerBuilder<B>, F: FnOnce(ServerBuilder) -> ServerBuilder<B>,
B: Fetch, B: Fetch,
{ {
@ -74,33 +76,15 @@ pub fn init_server<F, B>(process: F, io: MetaIoHandler<Metadata>, remote: Remote
&dapps_path, registrar.clone(), remote, &dapps_path, registrar.clone(), remote,
)) ))
.signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)))
.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io, event_loop.raw_remote()).unwrap(); .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap();
( (
ServerLoop { server: server, event_loop: event_loop }, ServerLoop { server: server, event_loop: event_loop },
registrar, registrar,
) )
} }
pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { pub fn serve_with_rpc(io: IoHandler) -> ServerLoop {
init_logger(); init_server(|builder| builder, io, Remote::new_sync()).0
let registrar = Arc::new(FakeRegistrar::new());
let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let event_loop = EventLoop::spawn();
let io = MetaIoHandler::default();
let server = ServerBuilder::new(&dapps_path, registrar, event_loop.remote())
.signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)))
.allowed_hosts(None.into())
.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, io, event_loop.raw_remote()).unwrap();
ServerLoop {
server: server,
event_loop: event_loop,
}
}
pub fn serve_with_rpc(io: MetaIoHandler<Metadata>) -> ServerLoop {
init_server(|builder| builder.allowed_hosts(None.into()), io, Remote::new_sync()).0
} }
pub fn serve_hosts(hosts: Option<Vec<String>>) -> ServerLoop { pub fn serve_hosts(hosts: Option<Vec<String>>) -> ServerLoop {
@ -108,20 +92,13 @@ pub fn serve_hosts(hosts: Option<Vec<String>>) -> ServerLoop {
init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0
} }
pub fn serve_extra_cors(extra_cors: Option<Vec<String>>) -> ServerLoop {
let extra_cors = extra_cors.map(|cors| cors.into_iter().map(Into::into).collect());
init_server(|builder| builder.allowed_hosts(None.into()).extra_cors_headers(extra_cors.into()), Default::default(), Remote::new_sync()).0
}
pub fn serve_with_registrar() -> (ServerLoop, Arc<FakeRegistrar>) { pub fn serve_with_registrar() -> (ServerLoop, Arc<FakeRegistrar>) {
init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()) init_server(|builder| builder, Default::default(), Remote::new_sync())
} }
pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc<FakeRegistrar>) { pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc<FakeRegistrar>) {
init_server(|builder| { init_server(|builder| {
builder builder.sync_status(Arc::new(|| true))
.sync_status(Arc::new(|| true))
.allowed_hosts(None.into())
}, Default::default(), Remote::new_sync()) }, Default::default(), Remote::new_sync())
} }
@ -133,7 +110,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv
let fetch = FakeFetch::default(); let fetch = FakeFetch::default();
let f = fetch.clone(); let f = fetch.clone();
let (server, reg) = init_server(move |builder| { let (server, reg) = init_server(move |builder| {
builder.allowed_hosts(None.into()).fetch(f.clone()) builder.fetch(f.clone())
}, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() });
(server, fetch, reg) (server, fetch, reg)
@ -144,7 +121,6 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) {
let f = fetch.clone(); let f = fetch.clone();
let (server, _) = init_server(move |builder| { let (server, _) = init_server(move |builder| {
builder builder
.allowed_hosts(None.into())
.fetch(f.clone()) .fetch(f.clone())
.web_proxy_tokens(Arc::new(move |token| &token == web_token)) .web_proxy_tokens(Arc::new(move |token| &token == web_token))
}, Default::default(), Remote::new_sync()); }, Default::default(), Remote::new_sync());
@ -153,7 +129,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) {
} }
pub fn serve() -> ServerLoop { pub fn serve() -> ServerLoop {
init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()).0 init_server(|builder| builder, Default::default(), Remote::new_sync()).0
} }
pub fn request(server: ServerLoop, request: &str) -> http_client::Response { pub fn request(server: ServerLoop, request: &str) -> http_client::Response {
@ -166,3 +142,157 @@ pub fn assert_security_headers(headers: &[String]) {
pub fn assert_security_headers_for_embed(headers: &[String]) { pub fn assert_security_headers_for_embed(headers: &[String]) {
http_client::assert_security_headers_present(headers, Some(SIGNER_PORT)) http_client::assert_security_headers_present(headers, Some(SIGNER_PORT))
} }
/// Webapps HTTP+RPC server build.
pub struct ServerBuilder<T: Fetch = FetchClient> {
dapps_path: PathBuf,
registrar: Arc<ContractClient>,
sync_status: Arc<SyncStatus>,
web_proxy_tokens: Arc<WebProxyTokens>,
signer_address: Option<(String, u16)>,
allowed_hosts: DomainsValidation<Host>,
remote: Remote,
fetch: Option<T>,
}
impl ServerBuilder {
/// Construct new dapps server
pub fn new<P: AsRef<Path>>(dapps_path: P, registrar: Arc<ContractClient>, remote: Remote) -> Self {
ServerBuilder {
dapps_path: dapps_path.as_ref().to_owned(),
registrar: registrar,
sync_status: Arc::new(|| false),
web_proxy_tokens: Arc::new(|_| false),
signer_address: None,
allowed_hosts: DomainsValidation::Disabled,
remote: remote,
fetch: None,
}
}
}
impl<T: Fetch> ServerBuilder<T> {
/// Set a fetch client to use.
pub fn fetch<X: Fetch>(self, fetch: X) -> ServerBuilder<X> {
ServerBuilder {
dapps_path: self.dapps_path,
registrar: self.registrar,
sync_status: self.sync_status,
web_proxy_tokens: self.web_proxy_tokens,
signer_address: self.signer_address,
allowed_hosts: self.allowed_hosts,
remote: self.remote,
fetch: Some(fetch),
}
}
/// Change default sync status.
pub fn sync_status(mut self, status: Arc<SyncStatus>) -> Self {
self.sync_status = status;
self
}
/// Change default web proxy tokens validator.
pub fn web_proxy_tokens(mut self, tokens: Arc<WebProxyTokens>) -> Self {
self.web_proxy_tokens = tokens;
self
}
/// Change default signer port.
pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self {
self.signer_address = signer_address;
self
}
/// Change allowed hosts.
/// `None` - All hosts are allowed
/// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address)
pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation<Host>) -> Self {
self.allowed_hosts = allowed_hosts;
self
}
/// Asynchronously start server with no authentication,
/// returns result with `Server` handle on success or an error.
pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result<Server, http::Error> {
let fetch = self.fetch_client();
Server::start_http(
addr,
io,
self.allowed_hosts,
self.signer_address,
self.dapps_path,
vec![],
self.registrar,
self.sync_status,
self.web_proxy_tokens,
self.remote,
fetch,
)
}
fn fetch_client(&self) -> T {
match self.fetch.clone() {
Some(fetch) => fetch,
None => T::new().unwrap(),
}
}
}
/// Webapps HTTP server.
pub struct Server {
server: Option<http::Server>,
}
impl Server {
fn start_http<F: Fetch>(
addr: &SocketAddr,
io: IoHandler,
allowed_hosts: DomainsValidation<Host>,
signer_address: Option<(String, u16)>,
dapps_path: PathBuf,
extra_dapps: Vec<PathBuf>,
registrar: Arc<ContractClient>,
sync_status: Arc<SyncStatus>,
web_proxy_tokens: Arc<WebProxyTokens>,
remote: Remote,
fetch: F,
) -> Result<Server, http::Error> {
let middleware = Middleware::new(
remote,
signer_address,
dapps_path,
extra_dapps,
registrar,
sync_status,
web_proxy_tokens,
fetch,
);
http::ServerBuilder::new(io)
.request_middleware(middleware)
.allowed_hosts(allowed_hosts)
.cors(http::DomainsValidation::Disabled)
.start_http(addr)
.map(|server| Server {
server: Some(server),
})
}
/// Returns address that this server is bound to.
pub fn addr(&self) -> &SocketAddr {
self.server.as_ref()
.expect("server is always Some at the start; it's consumed only when object is dropped; qed")
.addrs()
.first()
.expect("You cannot start the server without binding to at least one address; qed")
}
}
impl Drop for Server {
fn drop(&mut self) {
self.server.take().unwrap().close()
}
}

View File

@ -19,7 +19,6 @@
mod helpers; mod helpers;
mod api; mod api;
mod authorization;
mod fetch; mod fetch;
mod redirection; mod redirection;
mod rpc; mod rpc;

View File

@ -14,16 +14,14 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use futures::{future, Future}; use jsonrpc_core::{IoHandler, Value};
use ethcore_rpc::{Metadata, Origin};
use jsonrpc_core::{MetaIoHandler, Value};
use tests::helpers::{serve_with_rpc, request}; use tests::helpers::{serve_with_rpc, request};
#[test] #[test]
fn should_serve_rpc() { fn should_serve_rpc() {
// given // given
let mut io = MetaIoHandler::default(); let mut io = IoHandler::default();
io.add_method("rpc_test", |_| { io.add_method("rpc_test", |_| {
Ok(Value::String("Hello World!".into())) Ok(Value::String("Hello World!".into()))
}); });
@ -49,70 +47,3 @@ fn should_serve_rpc() {
response.assert_status("HTTP/1.1 200 OK"); response.assert_status("HTTP/1.1 200 OK");
assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned());
} }
#[test]
fn should_extract_metadata() {
// given
let mut io = MetaIoHandler::default();
io.add_method_with_meta("rpc_test", |_params, meta: Metadata| {
assert_eq!(meta.origin, Origin::Dapps("".into()));
assert_eq!(meta.dapp_id(), "".into());
future::ok(Value::String("Hello World!".into())).boxed()
});
let server = serve_with_rpc(io);
// when
let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#;
let response = request(server, &format!(
"\
POST /rpc/ HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
X-Parity-Origin: https://this.should.be.ignored\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}\r\n\
",
req.as_bytes().len(),
req,
));
// then
response.assert_status("HTTP/1.1 200 OK");
assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned());
}
#[test]
fn should_extract_metadata_from_custom_header() {
// given
let mut io = MetaIoHandler::default();
io.add_method_with_meta("rpc_test", |_params, meta: Metadata| {
assert_eq!(meta.origin, Origin::Dapps("https://parity.io/".into()));
assert_eq!(meta.dapp_id(), "https://parity.io/".into());
future::ok(Value::String("Hello World!".into())).boxed()
});
let server = serve_with_rpc(io);
// when
let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#;
let response = request(server, &format!(
"\
POST /rpc/ HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
Origin: null\r\n\
X-Parity-Origin: https://parity.io/\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}\r\n\
",
req.as_bytes().len(),
req,
));
// then
response.assert_status("HTTP/1.1 200 OK");
assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned());
}

View File

@ -34,7 +34,7 @@ fn should_reject_invalid_host() {
// then // then
assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned());
assert!(response.body.contains("Current Host Is Disallowed"), response.body); assert!(response.body.contains("Provided Host header is not whitelisted."), response.body);
} }
#[test] #[test]
@ -97,31 +97,3 @@ fn should_allow_parity_utils_even_on_invalid_domain() {
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
} }
#[test]
fn should_not_return_cors_headers_for_rpc() {
// given
let server = serve_hosts(Some(vec!["localhost:8080".into()]));
// when
let response = request(server,
"\
POST /rpc HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Origin: null\r\n\
Content-Type: application/json\r\n\
Connection: close\r\n\
\r\n\
{}
"
);
// then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
assert!(
!response.headers_raw.contains("Access-Control-Allow-Origin"),
"CORS headers were not expected: {:?}",
response.headers
);
}

View File

@ -8,7 +8,10 @@ RUN apt-get update && \
curl \ curl \
git \ git \
file \ file \
binutils binutils \
libssl-dev \
pkg-config \
libudev-dev
# install rustup # install rustup
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y RUN curl https://sh.rustup.rs -sSf | sh -s -- -y

View File

@ -11,44 +11,46 @@ build = "build.rs"
"ethcore-ipc-codegen" = { path = "../ipc/codegen" } "ethcore-ipc-codegen" = { path = "../ipc/codegen" }
[dependencies] [dependencies]
log = "0.3"
env_logger = "0.4"
rustc-serialize = "0.3"
rust-crypto = "0.2.34"
num_cpus = "1.2"
crossbeam = "0.2.9"
lazy_static = "0.2"
bloomchain = "0.1"
semver = "0.6"
bit-set = "0.4" bit-set = "0.4"
time = "0.1" bloomchain = "0.1"
rand = "0.3"
byteorder = "1.0"
transient-hashmap = "0.4"
linked-hash-map = "0.3.0"
lru-cache = "0.1.0"
itertools = "0.5"
ethabi = "1.0.0"
evmjit = { path = "../evmjit", optional = true }
clippy = { version = "0.0.103", optional = true}
ethash = { path = "../ethash" }
ethcore-util = { path = "../util" }
ethcore-io = { path = "../util/io" }
ethcore-devtools = { path = "../devtools" }
ethjson = { path = "../json" }
ethcore-ipc = { path = "../ipc/rpc" }
ethstore = { path = "../ethstore" }
ethkey = { path = "../ethkey" }
ethcore-ipc-nano = { path = "../ipc/nano" }
rlp = { path = "../util/rlp" }
ethcore-stratum = { path = "../stratum" }
ethcore-bloom-journal = { path = "../util/bloom" }
hardware-wallet = { path = "../hw" }
ethcore-logger = { path = "../logger" }
stats = { path = "../util/stats" }
hyper = { git = "https://github.com/paritytech/hyper", default-features = false }
num = "0.1"
bn = { git = "https://github.com/paritytech/bn" } bn = { git = "https://github.com/paritytech/bn" }
byteorder = "1.0"
clippy = { version = "0.0.103", optional = true}
crossbeam = "0.2.9"
env_logger = "0.4"
ethabi = "1.0.0"
ethash = { path = "../ethash" }
ethcore-bloom-journal = { path = "../util/bloom" }
ethcore-devtools = { path = "../devtools" }
ethcore-io = { path = "../util/io" }
ethcore-ipc = { path = "../ipc/rpc" }
ethcore-ipc-nano = { path = "../ipc/nano" }
ethcore-logger = { path = "../logger" }
ethcore-stratum = { path = "../stratum" }
ethcore-util = { path = "../util" }
ethjson = { path = "../json" }
ethkey = { path = "../ethkey" }
ethstore = { path = "../ethstore" }
evmjit = { path = "../evmjit", optional = true }
futures = "0.1"
hardware-wallet = { path = "../hw" }
hyper = { git = "https://github.com/paritytech/hyper", default-features = false }
itertools = "0.5"
lazy_static = "0.2"
linked-hash-map = "0.3.0"
log = "0.3"
lru-cache = "0.1.0"
native-contracts = { path = "native_contracts" }
num = "0.1"
num_cpus = "1.2"
rand = "0.3"
rlp = { path = "../util/rlp" }
rust-crypto = "0.2.34"
rustc-serialize = "0.3"
semver = "0.6"
stats = { path = "../util/stats" }
time = "0.1"
transient-hashmap = "0.4"
[features] [features]
jit = ["evmjit"] jit = ["evmjit"]

View File

@ -0,0 +1,15 @@
[package]
name = "native-contracts"
description = "Generated Rust code for Ethereum contract ABIs"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"
[dependencies]
ethabi = "1.0"
futures = "0.1"
byteorder = "1.0"
ethcore-util = { path = "../../util" }
[build-dependencies]
native-contract-generator = { path = "generator" }

View File

@ -0,0 +1,40 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate native_contract_generator;
use std::path::Path;
use std::fs::File;
use std::io::Write;
// TODO: `include!` these from files where they're pretty-printed?
const REGISTRY_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}]"#;
const SERVICE_TRANSACTION_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}]"#;
fn build_file(name: &str, abi: &str, filename: &str) {
let code = ::native_contract_generator::generate_module(name, abi).unwrap();
let out_dir = ::std::env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join(filename);
let mut f = File::create(&dest_path).unwrap();
f.write_all(code.as_bytes()).unwrap();
}
fn main() {
build_file("Registry", REGISTRY_ABI, "registry.rs");
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
}

View File

@ -0,0 +1,9 @@
[package]
name = "native-contract-generator"
description = "Generates Rust code for ethereum contract ABIs"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethabi = "1.0.2"
heck = "0.2"

View File

@ -0,0 +1,346 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Rust code contract generator.
//! The code generated will require a dependence on the `ethcore-util`,
//! `ethabi`, `byteorder`, and `futures` crates.
//! This currently isn't hygienic, so compilation of generated code may fail
//! due to missing crates or name collisions. This will change when
//! it can be ported to a procedural macro.
use ethabi::Contract;
use ethabi::spec::{Interface, ParamType, Error as AbiError};
use heck::SnakeCase;
extern crate ethabi;
extern crate heck;
/// Errors in generation.
#[derive(Debug)]
pub enum Error {
/// Bad ABI.
Abi(AbiError),
/// Unsupported parameter type in given function.
UnsupportedType(String, ParamType),
}
/// Given an ABI string, generate code for a a Rust module containing
/// a struct which can be used to call it.
// TODO: make this a proc macro when that's possible.
pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
let contract = Contract::new(Interface::load(abi.as_bytes()).map_err(Error::Abi)?);
let functions = generate_functions(&contract)?;
Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, BoxFuture}};
use ethabi::{{Contract, Interface, Token}};
use util::{{self, Uint}};
pub struct {name} {{
contract: Contract,
/// Address to make calls to.
pub address: util::Address,
}}
const ABI: &'static str = r#"{abi_str}"#;
impl {name} {{
/// Create a new instance of `{name}` with an address.
/// Calls can be made, given a callback for dispatching calls asynchronously.
pub fn new(address: util::Address) -> Self {{
let contract = Contract::new(Interface::load(ABI.as_bytes())
.expect("ABI checked at generation-time; qed"));
{name} {{
contract: contract,
address: address,
}}
}}
{functions}
}}
"##,
name = struct_name,
abi_str = abi,
functions = functions,
))
}
// generate function bodies from the ABI.
fn generate_functions(contract: &Contract) -> Result<String, Error> {
let mut functions = String::new();
for function in contract.functions() {
let name = function.name();
let snake_name = name.to_snake_case();
let inputs = function.input_params();
let outputs = function.output_params();
let (input_params, to_tokens) = input_params_codegen(&inputs)
.map_err(|bad_type| Error::UnsupportedType(name.into(), bad_type))?;
let (output_type, decode_outputs) = output_params_codegen(&outputs)
.map_err(|bad_type| Error::UnsupportedType(name.into(), bad_type))?;
functions.push_str(&format!(r##"
/// Call the function "{abi_name}" on the contract.
/// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where F: Fn(util::Address, Vec<u8>) -> U, U: Future<Item=Vec<u8>, Error=String> + Send + 'static
{{
let function = self.contract.function(r#"{abi_name}"#.to_string())
.expect("function existence checked at compile-time; qed");
let call_addr = self.address;
let call_future = match function.encode_call({to_tokens}) {{
Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return future::err(format!("Error encoding call: {{:?}}", e)).boxed(),
}};
call_future
.and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e)))
.map(::std::collections::VecDeque::from)
.and_then(|mut outputs| {decode_outputs})
.boxed()
}}
"##,
abi_name = name,
abi_inputs = inputs,
abi_outputs = outputs,
snake_name = snake_name,
params = input_params,
output_type = output_type,
to_tokens = to_tokens,
decode_outputs = decode_outputs,
))
}
Ok(functions)
}
// generate code for params in function signature and turning them into tokens.
//
// two pieces of code are generated: the first gives input types for the function signature,
// and the second gives code to tokenize those inputs.
//
// params of form `param_0: type_0, param_1: type_1, ...`
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
//
// returns any unsupported param type encountered.
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut params = String::new();
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
for (index, param_type) in inputs.iter().enumerate() {
let param_name = format!("param_{}", index);
let rust_type = rust_type(param_type.clone())?;
let (needs_mut, tokenize_code) = tokenize(&param_name, param_type.clone());
params.push_str(&format!("{}{}: {}, ",
if needs_mut { "mut " } else { "" }, param_name, rust_type));
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
}
to_tokens.push_str(" tokens }");
Ok((params, to_tokens))
}
// generate code for outputs of the function and detokenizing them.
//
// two pieces of code are generated: the first gives an output type for the function signature
// as a tuple, and the second gives code to get that tuple from a deque of tokens.
//
// produce output type of the form (type_1, type_2, ...) without trailing comma.
// produce code for getting this output type from `outputs: VecDeque<Token>`, where
// an `Err(String)` can be returned.
//
// returns any unsupported param type encountered.
fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut output_type = "(".to_string();
let mut decode_outputs = "Ok((".to_string();
for (index, output) in outputs.iter().cloned().enumerate() {
let rust_type = rust_type(output.clone())?;
output_type.push_str(&rust_type);
decode_outputs.push_str(&format!(
r#"
outputs
.pop_front()
.and_then(|output| {{ {} }})
.ok_or_else(|| "Wrong output type".to_string())?
"#,
detokenize("output", output)
));
// don't append trailing commas for the last element
// so we can reuse the same code for single-output contracts,
// since T == (T) != (T,)
if index < outputs.len() - 1 {
output_type.push_str(", ");
decode_outputs.push_str(", ");
}
}
output_type.push_str(")");
decode_outputs.push_str("))");
Ok((output_type, decode_outputs))
}
// create code for an argument type from param type.
fn rust_type(input: ParamType) -> Result<String, ParamType> {
Ok(match input {
ParamType::Address => "util::Address".into(),
ParamType::FixedBytes(len) if len <= 32 => format!("util::H{}", len * 8),
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
ParamType::Int(width) => match width {
8 | 16 | 32 | 64 => format!("i{}", width),
_ => return Err(ParamType::Int(width)),
},
ParamType::Uint(width) => match width {
8 | 16 | 32 | 64 => format!("u{}", width),
128 | 160 | 256 => format!("util::U{}", width),
_ => return Err(ParamType::Uint(width)),
},
ParamType::Bool => "bool".into(),
ParamType::String => "String".into(),
ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?),
other => return Err(other),
})
}
// create code for tokenizing this parameter.
// returns (needs_mut, code), where needs_mut indicates mutability required.
// panics on unsupported types.
fn tokenize(name: &str, input: ParamType) -> (bool, String) {
let mut needs_mut = false;
let code = match input {
ParamType::Address => format!("Token::Address({}.0)", name),
ParamType::Bytes => format!("Token::Bytes({})", name),
ParamType::FixedBytes(len) if len <= 32 =>
format!("Token::FixedBytes({}.0.to_vec())", name),
ParamType::FixedBytes(len) => {
needs_mut = true;
format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name)
}
ParamType::Int(width) => match width {
8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name),
16 | 32 | 64 =>
format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))",
width, 32 - (width / 8), name),
_ => panic!("Signed int with more than 64 bits not supported."),
},
ParamType::Uint(width) => format!(
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
if width <= 64 { format!("util::U256::from({} as u64)", name) }
else { format!("util::U256::from({})", name) }
),
ParamType::Bool => format!("Token::Bool({})", name),
ParamType::String => format!("Token::String({})", name),
ParamType::Array(kind) => {
let (needs_mut, code) = tokenize("i", *kind);
format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())",
name, if needs_mut { "mut " } else { "" }, code)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."),
};
(needs_mut, code)
}
// create code for detokenizing this parameter.
// takes an output type and the identifier of a token.
// expands to code that evaluates to a Option<concrete type>
// panics on unsupported types.
fn detokenize(name: &str, output_type: ParamType) -> String {
match output_type {
ParamType::Address => format!("{}.to_address().map(util::H160)", name),
ParamType::Bytes => format!("{}.to_bytes()", name),
ParamType::FixedBytes(len) if len <= 32 => {
// ensure no panic on slice too small.
let read_hash = format!("b.resize({}, 0); util::H{}::from_slice(&b[..{}])",
len, len * 8, len);
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
name, read_hash)
}
ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name),
ParamType::Int(width) => {
let read_int = match width {
8 => "i[31] as i8".into(),
16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)),
_ => panic!("Signed integers over 64 bytes not allowed."),
};
format!("{}.to_int().map(|i| {})", name, read_int)
}
ParamType::Uint(width) => {
let read_uint = match width {
8 | 16 | 32 | 64 => format!("util::U256(u).low_u64() as u{}", width),
_ => format!("util::U{}::from(&u[..])", width),
};
format!("{}.to_uint().map(|u| {})", name, read_uint)
}
ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>()",
detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {})",
name, read_array)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")
}
}
#[cfg(test)]
mod tests {
use ethabi::spec::ParamType;
#[test]
fn input_types() {
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: util::Address, ");
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
"param_0: util::Address, param_1: Vec<u8>, ");
}
#[test]
fn output_types() {
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(util::Address)");
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
"(util::Address, Vec<Vec<u8>>)");
}
#[test]
fn rust_type() {
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "util::H256");
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
"Vec<util::H256>");
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
assert!(::rust_type(ParamType::Uint(63)).is_err());
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "util::U256");
}
// codegen tests will need bootstrapping of some kind.
}

View File

@ -0,0 +1,30 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Native contracts useful for Parity. These are type-safe wrappers
//! autogenerated at compile-time from Ethereum ABIs, and can be instantiated
//! given any closure which can dispatch calls to them asynchronously.
extern crate futures;
extern crate byteorder;
extern crate ethabi;
extern crate ethcore_util as util;
mod registry;
mod service_transaction;
pub use self::registry::Registry;
pub use self::service_transaction::ServiceTransactionChecker;

View File

@ -0,0 +1,22 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports)]
//! Registrar contract: maps names to addresses and data.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/registry.rs"));

View File

@ -0,0 +1,22 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports)]
//! Service transaction contract.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/service_transaction.rs"));

View File

@ -15,11 +15,11 @@
"difficultyHardforkTransition": "0x59d9", "difficultyHardforkTransition": "0x59d9",
"difficultyHardforkBoundDivisor": "0x0200", "difficultyHardforkBoundDivisor": "0x0200",
"bombDefuseTransition": "0x30d40", "bombDefuseTransition": "0x30d40",
"eip150Transition": "0x7fffffffffffffff", "eip150Transition": "0x927C0",
"eip155Transition": "0x7fffffffffffffff", "eip155Transition": "0x927C0",
"eip160Transition": "0x7fffffffffffffff", "eip160Transition": "0x927C0",
"eip161abcTransition": "0x7fffffffffffffff", "eip161abcTransition": "0x927C0",
"eip161dTransition": "0x7fffffffffffffff" "eip161dTransition": "0x927C0"
} }
} }
}, },
@ -28,6 +28,7 @@
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID": "0x1", "networkID": "0x1",
"chainID": "0x2",
"subprotocolName": "exp", "subprotocolName": "exp",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff"
}, },

View File

@ -30,47 +30,48 @@ use util::trie::TrieSpec;
use util::kvdb::*; use util::kvdb::*;
// other // other
use io::*;
use views::BlockView;
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
use header::BlockNumber;
use state::{self, State, CleanupMode};
use spec::Spec;
use basic_types::Seal; use basic_types::Seal;
use engines::Engine;
use service::ClientIoMessage;
use env_info::LastHashes;
use verification;
use verification::{PreverifiedBlock, Verifier};
use block::*; use block::*;
use transaction::{LocalizedTransaction, UnverifiedTransaction, SignedTransaction, Transaction, PendingTransaction, Action};
use blockchain::extras::TransactionAddress;
use types::filter::Filter;
use types::mode::Mode as IpcMode;
use log_entry::LocalizedLogEntry;
use verification::queue::BlockQueue;
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
use blockchain::extras::TransactionAddress;
use client::Error as ClientError;
use client::{ use client::{
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode, MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
ChainNotify, PruningInfo, ChainNotify, PruningInfo,
}; };
use client::Error as ClientError;
use env_info::EnvInfo;
use executive::{Executive, Executed, TransactOptions, contract_address};
use receipt::{Receipt, LocalizedReceipt};
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
use trace;
use trace::FlatTransactionTraces;
use evm::{Factory as EvmFactory, Schedule};
use miner::{Miner, MinerService, TransactionImportResult};
use snapshot::{self, io as snapshot_io};
use factory::Factories;
use rlp::UntrustedRlp;
use state_db::StateDB;
use rand::OsRng;
use client::registry::Registry;
use encoded; use encoded;
use engines::Engine;
use env_info::EnvInfo;
use env_info::LastHashes;
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
use evm::{Factory as EvmFactory, Schedule};
use executive::{Executive, Executed, TransactOptions, contract_address};
use factory::Factories;
use futures::{future, Future};
use header::BlockNumber;
use io::*;
use log_entry::LocalizedLogEntry;
use miner::{Miner, MinerService, TransactionImportResult};
use native_contracts::Registry;
use rand::OsRng;
use receipt::{Receipt, LocalizedReceipt};
use rlp::UntrustedRlp;
use service::ClientIoMessage;
use snapshot::{self, io as snapshot_io};
use spec::Spec;
use state_db::StateDB;
use state::{self, State, CleanupMode};
use trace;
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
use trace::FlatTransactionTraces;
use transaction::{LocalizedTransaction, UnverifiedTransaction, SignedTransaction, Transaction, PendingTransaction, Action};
use types::filter::Filter;
use types::mode::Mode as IpcMode;
use verification;
use verification::{PreverifiedBlock, Verifier};
use verification::queue::BlockQueue;
use views::BlockView;
// re-export // re-export
pub use types::blockchain_info::BlockChainInfo; pub use types::blockchain_info::BlockChainInfo;
@ -254,8 +255,7 @@ impl Client {
if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) { if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) {
trace!(target: "client", "Found registrar at {}", reg_addr); trace!(target: "client", "Found registrar at {}", reg_addr);
let weak = Arc::downgrade(&client); let registrar = Registry::new(reg_addr);
let registrar = Registry::new(reg_addr, move |a, d| weak.upgrade().ok_or("No client!".into()).and_then(|c| c.call_contract(BlockId::Latest, a, d)));
*client.registrar.lock() = Some(registrar); *client.registrar.lock() = Some(registrar);
} }
Ok(client) Ok(client)
@ -1488,12 +1488,17 @@ impl BlockChainClient for Client {
} }
fn registrar_address(&self) -> Option<Address> { fn registrar_address(&self) -> Option<Address> {
self.registrar.lock().as_ref().map(|r| r.address.clone()) self.registrar.lock().as_ref().map(|r| r.address)
} }
fn registry_address(&self, name: String) -> Option<Address> { fn registry_address(&self, name: String) -> Option<Address> {
self.registrar.lock().as_ref() self.registrar.lock().as_ref()
.and_then(|r| r.get_address(&(name.as_bytes().sha3()), "A").ok()) .and_then(|r| {
let dispatch = move |reg_addr, data| {
future::done(self.call_contract(BlockId::Latest, reg_addr, data))
};
r.get_address(dispatch, name.as_bytes().sha3(), "A".to_string()).wait().ok()
})
.and_then(|a| if a.is_zero() { None } else { Some(a) }) .and_then(|a| if a.is_zero() { None } else { Some(a) })
} }
} }

View File

@ -16,7 +16,6 @@
//! Blockchain database client. //! Blockchain database client.
mod registry;
mod config; mod config;
mod error; mod error;
mod test_client; mod test_client;

View File

@ -1,338 +0,0 @@
// Autogenerated from JSON contract definition using Rust contract convertor.
// Command line: --name=Registry --jsonabi=/Users/gav/registry.abi
#![allow(unused_imports)]
use std::string::String;
use std::result::Result;
use std::fmt;
use {util, ethabi};
use util::{Uint};
pub struct Registry {
contract: ethabi::Contract,
pub address: util::Address,
do_call: Box<Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send + Sync + 'static>,
}
impl Registry {
pub fn new<F>(address: util::Address, do_call: F) -> Self
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send + Sync + 'static {
Registry {
contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":true,\"inputs\":[{\"name\":\"_data\",\"type\":\"address\"}],\"name\":\"canReverse\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"},{\"name\":\"_value\",\"type\":\"bytes32\"}],\"name\":\"setData\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"string\"}],\"name\":\"confirmReverse\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"reserve\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":true,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"drop\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"}],\"name\":\"getAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"setFee\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_to\",\"type\":\"address\"}],\"name\":\"transfer\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"}],\"name\":\"getData\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"reserved\",\"outputs\":[{\"name\":\"reserved\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[],\"name\":\"drain\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"string\"},{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"proposeReverse\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"hasReverse\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"}],\"name\":\"getUint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"fee\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"getOwner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"}],\"name\":\"getReverse\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_data\",\"type\":\"address\"}],\"name\":\"reverse\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"setUint\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"string\"},{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"confirmReverseAs\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[],\"name\":\"removeReverse\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_name\",\"type\":\"bytes32\"},{\"name\":\"_key\",\"type\":\"string\"},{\"name\":\"_value\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")),
address: address,
do_call: Box::new(do_call),
}
}
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn can_reverse(&self, _data: &util::Address) -> Result<bool, String>
{
let call = self.contract.function("canReverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_data.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_owner(&self, _new: &util::Address) -> Result<(), String>
{
let call = self.contract.function("setOwner".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_new.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_data(&self, _name: &util::H256, _key: &str, _value: &util::H256) -> Result<bool, String>
{
let call = self.contract.function("setData".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned()), ethabi::Token::FixedBytes(_value.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn confirm_reverse(&self, _name: &str) -> Result<bool, String>
{
let call = self.contract.function("confirmReverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::String(_name.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"}`
#[allow(dead_code)]
pub fn reserve(&self, _name: &util::H256) -> Result<bool, String>
{
let call = self.contract.function("reserve".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn drop(&self, _name: &util::H256) -> Result<bool, String>
{
let call = self.contract.function("drop".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_address(&self, _name: &util::H256, _key: &str) -> Result<util::Address, String>
{
let call = self.contract.function("getAddress".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_fee(&self, _amount: util::U256) -> Result<bool, String>
{
let call = self.contract.function("setFee".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Uint({ let mut r = [0u8; 32]; _amount.to_big_endian(&mut r); r })]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn transfer(&self, _name: &util::H256, _to: &util::Address) -> Result<bool, String>
{
let call = self.contract.function("transfer".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::Address(_to.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn owner(&self) -> Result<util::Address, String>
{
let call = self.contract.function("owner".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_data(&self, _name: &util::H256, _key: &str) -> Result<util::H256, String>
{
let call = self.contract.function("getData".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn reserved(&self, _name: &util::H256) -> Result<bool, String>
{
let call = self.contract.function("reserved".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn drain(&self) -> Result<bool, String>
{
let call = self.contract.function("drain".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn propose_reverse(&self, _name: &str, _who: &util::Address) -> Result<bool, String>
{
let call = self.contract.function("proposeReverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::String(_name.to_owned()), ethabi::Token::Address(_who.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn has_reverse(&self, _name: &util::H256) -> Result<bool, String>
{
let call = self.contract.function("hasReverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_uint(&self, _name: &util::H256, _key: &str) -> Result<util::U256, String>
{
let call = self.contract.function("getUint".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn fee(&self) -> Result<util::U256, String>
{
let call = self.contract.function("fee".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_owner(&self, _name: &util::H256) -> Result<util::Address, String>
{
let call = self.contract.function("getOwner".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_reverse(&self, _name: &util::H256) -> Result<util::Address, String>
{
let call = self.contract.function("getReverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn reverse(&self, _data: &util::Address) -> Result<String, String>
{
let call = self.contract.function("reverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_data.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_string().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_uint(&self, _name: &util::H256, _key: &str, _value: util::U256) -> Result<bool, String>
{
let call = self.contract.function("setUint".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned()), ethabi::Token::Uint({ let mut r = [0u8; 32]; _value.to_big_endian(&mut r); r })]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn confirm_reverse_as(&self, _name: &str, _who: &util::Address) -> Result<bool, String>
{
let call = self.contract.function("confirmReverseAs".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::String(_name.to_owned()), ethabi::Token::Address(_who.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn remove_reverse(&self) -> Result<(), String>
{
let call = self.contract.function("removeReverse".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![]
).map_err(Self::as_string)?;
call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_address(&self, _name: &util::H256, _key: &str, _value: &util::Address) -> Result<bool, String>
{
let call = self.contract.function("setAddress".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::FixedBytes(_name.as_ref().to_owned()), ethabi::Token::String(_key.to_owned()), ethabi::Token::Address(_value.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
}

View File

@ -79,37 +79,39 @@
//! cargo build --release //! cargo build --release
//! ``` //! ```
extern crate ethcore_io as io;
extern crate rustc_serialize;
extern crate crypto;
extern crate time;
extern crate env_logger;
extern crate num_cpus;
extern crate crossbeam;
extern crate ethjson;
extern crate bloomchain;
extern crate hyper;
extern crate ethash;
extern crate ethkey;
extern crate semver;
extern crate ethcore_ipc_nano as nanoipc;
extern crate ethcore_devtools as devtools;
extern crate rand;
extern crate bit_set; extern crate bit_set;
extern crate rlp; extern crate bloomchain;
extern crate ethcore_bloom_journal as bloom_journal; extern crate bn;
extern crate byteorder; extern crate byteorder;
extern crate transient_hashmap; extern crate crossbeam;
extern crate crypto;
extern crate env_logger;
extern crate ethabi;
extern crate ethash;
extern crate ethcore_bloom_journal as bloom_journal;
extern crate ethcore_devtools as devtools;
extern crate ethcore_io as io;
extern crate ethcore_ipc_nano as nanoipc;
extern crate ethcore_logger;
extern crate ethcore_stratum;
extern crate ethjson;
extern crate ethkey;
extern crate futures;
extern crate hardware_wallet;
extern crate hyper;
extern crate itertools;
extern crate linked_hash_map; extern crate linked_hash_map;
extern crate lru_cache; extern crate lru_cache;
extern crate ethcore_stratum; extern crate native_contracts;
extern crate ethabi; extern crate num_cpus;
extern crate hardware_wallet;
extern crate stats;
extern crate ethcore_logger;
extern crate num; extern crate num;
extern crate bn; extern crate rand;
extern crate itertools; extern crate rlp;
extern crate rustc_serialize;
extern crate semver;
extern crate stats;
extern crate time;
extern crate transient_hashmap;
#[macro_use] #[macro_use]
extern crate log; extern crate log;

View File

@ -14,9 +14,12 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use types::ids::BlockId;
use client::MiningBlockChainClient; use client::MiningBlockChainClient;
use transaction::SignedTransaction; use transaction::SignedTransaction;
use types::ids::BlockId;
use futures::{future, Future};
use native_contracts::ServiceTransactionChecker as Contract;
use util::{U256, Uint, Mutex}; use util::{U256, Uint, Mutex};
const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker"; const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker";
@ -24,7 +27,7 @@ const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transa
/// Service transactions checker. /// Service transactions checker.
#[derive(Default)] #[derive(Default)]
pub struct ServiceTransactionChecker { pub struct ServiceTransactionChecker {
contract: Mutex<Option<provider::Contract>>, contract: Mutex<Option<Contract>>,
} }
impl ServiceTransactionChecker { impl ServiceTransactionChecker {
@ -36,7 +39,7 @@ impl ServiceTransactionChecker {
.and_then(|contract_addr| { .and_then(|contract_addr| {
trace!(target: "txqueue", "Configuring for service transaction checker contract from {}", contract_addr); trace!(target: "txqueue", "Configuring for service transaction checker contract from {}", contract_addr);
Some(provider::Contract::new(contract_addr)) Some(Contract::new(contract_addr))
}) })
} }
} }
@ -46,168 +49,12 @@ impl ServiceTransactionChecker {
debug_assert_eq!(tx.gas_price, U256::zero()); debug_assert_eq!(tx.gas_price, U256::zero());
if let Some(ref contract) = *self.contract.lock() { if let Some(ref contract) = *self.contract.lock() {
let do_call = |a, d| client.call_contract(BlockId::Latest, a, d); contract.certified(
contract.certified(&do_call, &tx.sender()) |addr, data| future::done(client.call_contract(BlockId::Latest, addr, data)),
tx.sender()
).wait()
} else { } else {
Err("contract is not configured".to_owned()) Err("contract is not configured".to_owned())
} }
} }
} }
mod provider {
// Autogenerated from JSON contract definition using Rust contract convertor.
// Command line: --jsonabi=SimpleCertifier.abi --explicit-do-call
#![allow(unused_imports)]
use std::string::String;
use std::result::Result;
use std::fmt;
use {util, ethabi};
use util::{Uint};
pub struct Contract {
contract: ethabi::Contract,
address: util::Address,
}
impl Contract {
pub fn new(address: util::Address) -> Self
{
Contract {
contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setOwner\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certify\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"revoke\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"delegate\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"getUint\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_new\",\"type\":\"address\"}],\"name\":\"setDelegate\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"}],\"name\":\"certified\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_who\",\"type\":\"address\"},{\"name\":\"_field\",\"type\":\"string\"}],\"name\":\"get\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")),
address: address,
}
}
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_owner<F>(&self, do_call: &F, _new: &util::Address) -> Result<(), String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("setOwner".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_new.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn certify<F>(&self, do_call: &F, _who: &util::Address) -> Result<(), String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("certify".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_who.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_address<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::Address, String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("getAddress".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn revoke<F>(&self, do_call: &F, _who: &util::Address) -> Result<(), String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("revoke".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_who.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn owner<F>(&self, do_call: &F) -> Result<util::Address, String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("owner".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![]
).map_err(Self::as_string)?;
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn delegate<F>(&self, do_call: &F) -> Result<util::Address, String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("delegate".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![]
).map_err(Self::as_string)?;
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_address().ok_or("Invalid type returned")?; util::Address::from(r) }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get_uint<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::U256, String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("getUint".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_uint().ok_or("Invalid type returned")?; util::U256::from(r.as_ref()) }))
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn set_delegate<F>(&self, do_call: &F, _new: &util::Address) -> Result<(), String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("setDelegate".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_new.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn certified<F>(&self, do_call: &F, _who: &util::Address) -> Result<bool, String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("certified".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_who.clone().0)]
).map_err(Self::as_string)?;
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_bool().ok_or("Invalid type returned")?; r }))
}
/// Auto-generated from: `{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn get<F>(&self, do_call: &F, _who: &util::Address, _field: &str) -> Result<util::H256, String>
where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send {
let call = self.contract.function("get".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(_who.clone().0), ethabi::Token::String(_field.to_owned())]
).map_err(Self::as_string)?;
let output = call.decode_output((do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
let mut result = output.into_iter().rev().collect::<Vec<_>>();
Ok(({ let r = result.pop().ok_or("Invalid return arity")?; let r = r.to_fixed_bytes().ok_or("Invalid type returned")?; util::H256::from_slice(r.as_ref()) }))
}
}
}

View File

@ -78,6 +78,12 @@ impl fmt::Display for Error {
} }
} }
impl Into<String> for Error {
fn into(self) -> String {
format!("{}", self)
}
}
impl From<SecpError> for Error { impl From<SecpError> for Error {
fn from(e: SecpError) -> Self { fn from(e: SecpError) -> Self {
Error::Secp(e) Error::Secp(e)

View File

@ -27,6 +27,7 @@ pub fn public_to_address(public: &Public) -> Address {
result result
} }
#[derive(Clone)]
/// secp256k1 key pair /// secp256k1 key pair
pub struct KeyPair { pub struct KeyPair {
secret: Secret, secret: Secret,

View File

@ -92,12 +92,13 @@ pub enum URLHintResult {
} }
/// URLHint Contract interface /// URLHint Contract interface
pub trait URLHint { pub trait URLHint: Send + Sync {
/// Resolves given id to registrar entry. /// Resolves given id to registrar entry.
fn resolve(&self, id: Bytes) -> Option<URLHintResult>; fn resolve(&self, id: Bytes) -> Option<URLHintResult>;
} }
/// `URLHintContract` API /// `URLHintContract` API
#[derive(Clone)]
pub struct URLHintContract { pub struct URLHintContract {
urlhint: Contract, urlhint: Contract,
registrar: Contract, registrar: Contract,

View File

@ -32,12 +32,13 @@ use std::sync::Arc;
use std::net::{SocketAddr, IpAddr}; use std::net::{SocketAddr, IpAddr};
use error::ServerError; use error::ServerError;
use route::Out; use route::Out;
use http::hyper::server::{Listening, Handler, Request, Response}; use http::hyper::server::{Handler, Request, Response};
use http::hyper::net::HttpStream; use http::hyper::net::HttpStream;
use http::hyper::header::{self, Vary, ContentLength, ContentType}; use http::hyper::header::{self, Vary, ContentLength, ContentType};
use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode};
use ethcore::client::BlockChainClient; use ethcore::client::BlockChainClient;
pub use http::hyper::server::Listening;
pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; pub use http::{AccessControlAllowOrigin, Host, DomainsValidation};
/// Request/response handler /// Request/response handler

View File

@ -1,2 +1 @@
// test script 9 // test script 10
// trigger rebuild on master 15 Mar 2017, 11:19

View File

@ -167,6 +167,8 @@ usage! {
or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")),
flag_jsonrpc_hosts: String = "none", flag_jsonrpc_hosts: String = "none",
or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")),
flag_jsonrpc_threads: Option<usize> = None,
or |c: &Config| otry!(c.rpc).threads.map(Some),
// IPC // IPC
flag_no_ipc: bool = false, flag_no_ipc: bool = false,
@ -179,21 +181,8 @@ usage! {
// DAPPS // DAPPS
flag_no_dapps: bool = false, flag_no_dapps: bool = false,
or |c: &Config| otry!(c.dapps).disable.clone(), or |c: &Config| otry!(c.dapps).disable.clone(),
flag_dapps_port: u16 = 8080u16,
or |c: &Config| otry!(c.dapps).port.clone(),
flag_dapps_interface: String = "local",
or |c: &Config| otry!(c.dapps).interface.clone(),
flag_dapps_hosts: String = "none",
or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| vec.join(",")),
flag_dapps_cors: Option<String> = None,
or |c: &Config| otry!(c.dapps).cors.clone().map(Some),
flag_dapps_path: String = "$BASE/dapps", flag_dapps_path: String = "$BASE/dapps",
or |c: &Config| otry!(c.dapps).path.clone(), or |c: &Config| otry!(c.dapps).path.clone(),
flag_dapps_user: Option<String> = None,
or |c: &Config| otry!(c.dapps).user.clone().map(Some),
flag_dapps_pass: Option<String> = None,
or |c: &Config| otry!(c.dapps).pass.clone().map(Some),
flag_dapps_apis_all: bool = false, or |_| None,
// Secret Store // Secret Store
flag_no_secretstore: bool = false, flag_no_secretstore: bool = false,
@ -333,6 +322,22 @@ usage! {
or |c: &Config| otry!(c.misc).log_file.clone().map(Some), or |c: &Config| otry!(c.misc).log_file.clone().map(Some),
flag_no_color: bool = false, flag_no_color: bool = false,
or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(),
// -- Legacy Options supported in configs
flag_dapps_port: Option<u16> = None,
or |c: &Config| otry!(c.dapps).port.clone().map(Some),
flag_dapps_interface: Option<String> = None,
or |c: &Config| otry!(c.dapps).interface.clone().map(Some),
flag_dapps_hosts: Option<String> = None,
or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| Some(vec.join(","))),
flag_dapps_cors: Option<String> = None,
or |c: &Config| otry!(c.dapps).cors.clone().map(Some),
flag_dapps_user: Option<String> = None,
or |c: &Config| otry!(c.dapps).user.clone().map(Some),
flag_dapps_pass: Option<String> = None,
or |c: &Config| otry!(c.dapps).pass.clone().map(Some),
flag_dapps_apis_all: Option<bool> = None, or |_| None,
} }
{ {
// Values with optional default value. // Values with optional default value.
@ -424,6 +429,7 @@ struct Rpc {
cors: Option<String>, cors: Option<String>,
apis: Option<Vec<String>>, apis: Option<Vec<String>>,
hosts: Option<Vec<String>>, hosts: Option<Vec<String>>,
threads: Option<usize>,
} }
#[derive(Default, Debug, PartialEq, RustcDecodable)] #[derive(Default, Debug, PartialEq, RustcDecodable)]
@ -679,6 +685,7 @@ mod tests {
flag_jsonrpc_cors: Some("null".into()), flag_jsonrpc_cors: Some("null".into()),
flag_jsonrpc_apis: "web3,eth,net,parity,traces,rpc".into(), flag_jsonrpc_apis: "web3,eth,net,parity,traces,rpc".into(),
flag_jsonrpc_hosts: "none".into(), flag_jsonrpc_hosts: "none".into(),
flag_jsonrpc_threads: None,
// IPC // IPC
flag_no_ipc: false, flag_no_ipc: false,
@ -686,15 +693,8 @@ mod tests {
flag_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc".into(), flag_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc".into(),
// DAPPS // DAPPS
flag_no_dapps: false,
flag_dapps_port: 8080u16,
flag_dapps_interface: "local".into(),
flag_dapps_hosts: "none".into(),
flag_dapps_cors: None,
flag_dapps_path: "$HOME/.parity/dapps".into(), flag_dapps_path: "$HOME/.parity/dapps".into(),
flag_dapps_user: Some("test_user".into()), flag_no_dapps: false,
flag_dapps_pass: Some("test_pass".into()),
flag_dapps_apis_all: false,
flag_no_secretstore: false, flag_no_secretstore: false,
flag_secretstore_port: 8082u16, flag_secretstore_port: 8082u16,
@ -799,6 +799,14 @@ mod tests {
flag_extradata: None, flag_extradata: None,
flag_cache: None, flag_cache: None,
flag_warp: Some(true), flag_warp: Some(true),
// Legacy-Dapps
flag_dapps_port: Some(8080),
flag_dapps_interface: Some("local".into()),
flag_dapps_hosts: Some("none".into()),
flag_dapps_cors: None,
flag_dapps_user: Some("test_user".into()),
flag_dapps_pass: Some("test_pass".into()),
flag_dapps_apis_all: None,
// -- Miscellaneous Options // -- Miscellaneous Options
flag_version: false, flag_version: false,
@ -882,6 +890,7 @@ mod tests {
cors: None, cors: None,
apis: None, apis: None,
hosts: None, hosts: None,
threads: None,
}), }),
ipc: Some(Ipc { ipc: Some(Ipc {
disable: None, disable: None,

View File

@ -74,7 +74,7 @@ Operating Options:
synchronize a bare minimum of data and fetch necessary synchronize a bare minimum of data and fetch necessary
data on-demand from the network. Much lower in storage, data on-demand from the network. Much lower in storage,
potentially higher in bandwidth. Has no effect with potentially higher in bandwidth. Has no effect with
subcommands (default: {flag_light}) subcommands (default: {flag_light}).
Account Options: Account Options:
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
@ -155,6 +155,8 @@ API and Console Options:
is additional security against some attack is additional security against some attack
vectors. Special options: "all", "none", vectors. Special options: "all", "none",
(default: {flag_jsonrpc_hosts}). (default: {flag_jsonrpc_hosts}).
--jsonrpc-threads THREADS Enables experimental faster implementation of JSON-RPC server.
Requires Dapps server to be disabled using --no-dapps. (default: {flag_jsonrpc_threads:?})
--no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc}) --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
--ipc-path PATH Specify custom path for JSON-RPC over IPC service --ipc-path PATH Specify custom path for JSON-RPC over IPC service
@ -163,29 +165,8 @@ API and Console Options:
IPC (default: {flag_ipc_apis}). IPC (default: {flag_ipc_apis}).
--no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps}) --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
--dapps-port PORT Specify the port portion of the Dapps server
(default: {flag_dapps_port}).
--dapps-interface IP Specify the hostname portion of the Dapps
server, IP should be an interface's IP address,
or local (default: {flag_dapps_interface}).
--dapps-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it
is additional security against some attack
vectors. Special options: "all", "none",
(default: {flag_dapps_hosts}).
--dapps-cors URL Specify CORS headers for Dapps server APIs.
(default: {flag_dapps_cors:?})
--dapps-user USERNAME Specify username for Dapps server. It will be
used in HTTP Basic Authentication Scheme.
If --dapps-pass is not specified you will be
asked for password on startup. (default: {flag_dapps_user:?})
--dapps-pass PASSWORD Specify password for Dapps server. Use only in
conjunction with --dapps-user. (default: {flag_dapps_pass:?})
--dapps-path PATH Specify directory where dapps should be installed. --dapps-path PATH Specify directory where dapps should be installed.
(default: {flag_dapps_path}) (default: {flag_dapps_path})
--dapps-apis-all Expose all possible RPC APIs on Dapps port.
WARNING: INSECURE. Used only for development.
(default: {flag_dapps_apis_all})
--ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api}) --ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api})
--ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen. --ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen.
(default: {flag_ipfs_api_port}) (default: {flag_ipfs_api_port})
@ -398,6 +379,13 @@ Legacy Options:
--jsonrpc-off Equivalent to --no-jsonrpc. --jsonrpc-off Equivalent to --no-jsonrpc.
-w --webapp Does nothing; dapps server is on by default now. -w --webapp Does nothing; dapps server is on by default now.
--dapps-off Equivalent to --no-dapps. --dapps-off Equivalent to --no-dapps.
--dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?})
--dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?})
--dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?})
--dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?})
--dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?})
--dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?})
--dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?})
--rpc Does nothing; JSON-RPC is on by default now. --rpc Does nothing; JSON-RPC is on by default now.
--warp Does nothing; Warp sync is on by default. (default: {flag_warp}) --warp Does nothing; Warp sync is on by default. (default: {flag_warp})
--rpcaddr IP Equivalent to --jsonrpc-interface IP. --rpcaddr IP Equivalent to --jsonrpc-interface IP.

View File

@ -132,12 +132,17 @@ impl Configuration {
let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive);
let geth_compatibility = self.args.flag_geth; let geth_compatibility = self.args.flag_geth;
let ui_address = self.ui_port().map(|port| (self.ui_interface(), port)); let ui_address = self.ui_port().map(|port| (self.ui_interface(), port));
let dapps_conf = self.dapps_config(); let mut dapps_conf = self.dapps_config();
let ipfs_conf = self.ipfs_config(); let ipfs_conf = self.ipfs_config();
let signer_conf = self.signer_config(); let signer_conf = self.signer_config();
let secretstore_conf = self.secretstore_config(); let secretstore_conf = self.secretstore_config();
let format = self.format()?; let format = self.format()?;
if self.args.flag_jsonrpc_threads.is_some() && dapps_conf.enabled {
dapps_conf.enabled = false;
writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr.")
}
let cmd = if self.args.flag_version { let cmd = if self.args.flag_version {
Cmd::Version Cmd::Version
} else if self.args.cmd_signer { } else if self.args.cmd_signer {
@ -556,19 +561,12 @@ impl Configuration {
fn dapps_config(&self) -> DappsConfiguration { fn dapps_config(&self) -> DappsConfiguration {
DappsConfiguration { DappsConfiguration {
enabled: self.dapps_enabled(), enabled: self.dapps_enabled(),
interface: self.dapps_interface(),
port: self.args.flag_dapps_port,
hosts: self.dapps_hosts(),
cors: self.dapps_cors(),
user: self.args.flag_dapps_user.clone(),
pass: self.args.flag_dapps_pass.clone(),
dapps_path: PathBuf::from(self.directories().dapps), dapps_path: PathBuf::from(self.directories().dapps),
extra_dapps: if self.args.cmd_dapp { extra_dapps: if self.args.cmd_dapp {
self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect() self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect()
} else { } else {
vec![] vec![]
}, },
all_apis: self.args.flag_dapps_apis_all,
} }
} }
@ -748,14 +746,10 @@ impl Configuration {
Self::cors(self.args.flag_ipfs_api_cors.as_ref()) Self::cors(self.args.flag_ipfs_api_cors.as_ref())
} }
fn dapps_cors(&self) -> Option<Vec<String>> {
Self::cors(self.args.flag_dapps_cors.as_ref())
}
fn hosts(hosts: &str) -> Option<Vec<String>> { fn hosts(hosts: &str) -> Option<Vec<String>> {
match hosts { match hosts {
"none" => return Some(Vec::new()), "none" => return Some(Vec::new()),
"all" => return None, "*" | "all" | "any" => return None,
_ => {} _ => {}
} }
let hosts = hosts.split(',').map(Into::into).collect(); let hosts = hosts.split(',').map(Into::into).collect();
@ -766,10 +760,6 @@ impl Configuration {
Self::hosts(&self.args.flag_jsonrpc_hosts) Self::hosts(&self.args.flag_jsonrpc_hosts)
} }
fn dapps_hosts(&self) -> Option<Vec<String>> {
Self::hosts(&self.args.flag_dapps_hosts)
}
fn ipfs_hosts(&self) -> Option<Vec<String>> { fn ipfs_hosts(&self) -> Option<Vec<String>> {
Self::hosts(&self.args.flag_ipfs_api_hosts) Self::hosts(&self.args.flag_ipfs_api_hosts)
} }
@ -795,12 +785,17 @@ impl Configuration {
fn http_config(&self) -> Result<HttpConfiguration, String> { fn http_config(&self) -> Result<HttpConfiguration, String> {
let conf = HttpConfiguration { let conf = HttpConfiguration {
enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, enabled: self.rpc_enabled(),
interface: self.rpc_interface(), interface: self.rpc_interface(),
port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port),
apis: self.rpc_apis().parse()?, apis: self.rpc_apis().parse()?,
hosts: self.rpc_hosts(), hosts: self.rpc_hosts(),
cors: self.rpc_cors(), cors: self.rpc_cors(),
threads: match self.args.flag_jsonrpc_threads {
Some(threads) if threads > 0 => Some(threads),
None => None,
_ => return Err("--jsonrpc-threads number needs to be positive.".into()),
}
}; };
Ok(conf) Ok(conf)
@ -811,7 +806,7 @@ impl Configuration {
name: self.args.flag_identity.clone(), name: self.args.flag_identity.clone(),
chain: self.chain(), chain: self.chain(),
network_port: self.args.flag_port, network_port: self.args.flag_port,
rpc_enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, rpc_enabled: self.rpc_enabled(),
rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()),
rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port),
} }
@ -918,13 +913,6 @@ impl Configuration {
Self::interface(&self.network_settings().rpc_interface) Self::interface(&self.network_settings().rpc_interface)
} }
fn dapps_interface(&self) -> String {
match self.args.flag_dapps_interface.as_str() {
"local" => "127.0.0.1",
x => x,
}.into()
}
fn ipfs_interface(&self) -> String { fn ipfs_interface(&self) -> String {
Self::interface(&self.args.flag_ipfs_api_interface) Self::interface(&self.args.flag_ipfs_api_interface)
} }
@ -940,8 +928,12 @@ impl Configuration {
Self::interface(&self.args.flag_stratum_interface) Self::interface(&self.args.flag_stratum_interface)
} }
fn rpc_enabled(&self) -> bool {
!self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc
}
fn dapps_enabled(&self) -> bool { fn dapps_enabled(&self) -> bool {
!self.args.flag_dapps_off && !self.args.flag_no_dapps && cfg!(feature = "dapps") !self.args.flag_dapps_off && !self.args.flag_no_dapps && self.rpc_enabled() && cfg!(feature = "dapps")
} }
fn secretstore_enabled(&self) -> bool { fn secretstore_enabled(&self) -> bool {
@ -1321,23 +1313,6 @@ mod tests {
assert_eq!(conf3.rpc_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); assert_eq!(conf3.rpc_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()]));
} }
#[test]
fn should_parse_dapps_hosts() {
// given
// when
let conf0 = parse(&["parity"]);
let conf1 = parse(&["parity", "--dapps-hosts", "none"]);
let conf2 = parse(&["parity", "--dapps-hosts", "all"]);
let conf3 = parse(&["parity", "--dapps-hosts", "ethcore.io,something.io"]);
// then
assert_eq!(conf0.dapps_hosts(), Some(Vec::new()));
assert_eq!(conf1.dapps_hosts(), Some(Vec::new()));
assert_eq!(conf2.dapps_hosts(), None);
assert_eq!(conf3.dapps_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()]));
}
#[test] #[test]
fn should_parse_ipfs_hosts() { fn should_parse_ipfs_hosts() {
// given // given

View File

@ -20,26 +20,18 @@ use std::sync::Arc;
use dir::default_data_path; use dir::default_data_path;
use ethcore::client::{Client, BlockChainClient, BlockId}; use ethcore::client::{Client, BlockChainClient, BlockId};
use ethcore::transaction::{Transaction, Action}; use ethcore::transaction::{Transaction, Action};
use ethcore_rpc::informant::RpcStats;
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use hash_fetch::urlhint::ContractClient; use hash_fetch::urlhint::ContractClient;
use helpers::replace_home; use helpers::replace_home;
use rpc_apis::{self, SignerService}; use rpc_apis::SignerService;
use parity_reactor; use parity_reactor;
use util::{Bytes, Address, U256}; use util::{Bytes, Address, U256};
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Configuration { pub struct Configuration {
pub enabled: bool, pub enabled: bool,
pub interface: String,
pub port: u16,
pub hosts: Option<Vec<String>>,
pub cors: Option<Vec<String>>,
pub user: Option<String>,
pub pass: Option<String>,
pub dapps_path: PathBuf, pub dapps_path: PathBuf,
pub extra_dapps: Vec<PathBuf>, pub extra_dapps: Vec<PathBuf>,
pub all_apis: bool,
} }
impl Default for Configuration { impl Default for Configuration {
@ -47,15 +39,8 @@ impl Default for Configuration {
let data_dir = default_data_path(); let data_dir = default_data_path();
Configuration { Configuration {
enabled: true, enabled: true,
interface: "127.0.0.1".into(),
port: 8080,
hosts: Some(Vec::new()),
cors: None,
user: None,
pass: None,
dapps_path: replace_home(&data_dir, "$BASE/dapps").into(), dapps_path: replace_home(&data_dir, "$BASE/dapps").into(),
extra_dapps: vec![], extra_dapps: vec![],
all_apis: false,
} }
} }
} }
@ -96,70 +81,52 @@ impl ContractClient for FullRegistrar {
// TODO: light client implementation forwarding to OnDemand and waiting for future // TODO: light client implementation forwarding to OnDemand and waiting for future
// to resolve. // to resolve.
pub struct Dependencies {
pub struct Dependencies<D: rpc_apis::Dependencies> { pub sync_status: Arc<::parity_dapps::SyncStatus>,
pub apis: Arc<D>,
pub sync_status: Arc<::ethcore_dapps::SyncStatus>,
pub contract_client: Arc<ContractClient>, pub contract_client: Arc<ContractClient>,
pub remote: parity_reactor::TokioRemote, pub remote: parity_reactor::TokioRemote,
pub fetch: FetchClient, pub fetch: FetchClient,
pub signer: Arc<SignerService>, pub signer: Arc<SignerService>,
pub stats: Arc<RpcStats>,
} }
pub fn new<D>(configuration: Configuration, deps: Dependencies<D>) -> Result<Option<WebappServer>, String> pub fn new(configuration: Configuration, deps: Dependencies)
where D: rpc_apis::Dependencies -> Result<Option<Middleware>, String>
{ {
if !configuration.enabled { if !configuration.enabled {
return Ok(None); return Ok(None);
} }
let url = format!("{}:{}", configuration.interface, configuration.port); dapps_middleware(
let addr = url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url))?;
let auth = configuration.user.as_ref().map(|username| {
let password = configuration.pass.as_ref().map_or_else(|| {
use rpassword::read_password;
println!("Type password for WebApps server (user: {}): ", username);
let pass = read_password().unwrap();
println!("OK, got it. Starting server...");
pass
}, |pass| pass.to_owned());
(username.to_owned(), password)
});
Ok(Some(setup_dapps_server(
deps, deps,
configuration.dapps_path, configuration.dapps_path,
configuration.extra_dapps, configuration.extra_dapps,
&addr, ).map(Some)
configuration.hosts,
configuration.cors,
auth,
configuration.all_apis,
)?))
} }
pub use self::server::WebappServer; pub use self::server::Middleware;
pub use self::server::setup_dapps_server; pub use self::server::dapps_middleware;
#[cfg(not(feature = "dapps"))] #[cfg(not(feature = "dapps"))]
mod server { mod server {
use super::Dependencies; use super::Dependencies;
use std::net::SocketAddr;
use std::path::PathBuf; use std::path::PathBuf;
use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction};
pub struct WebappServer; pub struct Middleware;
pub fn setup_dapps_server(
impl RequestMiddleware for Middleware {
fn on_request(
&self, req: &hyper::server::Request<hyper::net::HttpStream>, control: &hyper::Control
) -> RequestMiddlewareAction {
unreachable!()
}
}
pub fn dapps_middleware(
_deps: Dependencies, _deps: Dependencies,
_dapps_path: PathBuf, _dapps_path: PathBuf,
_extra_dapps: Vec<PathBuf>, _extra_dapps: Vec<PathBuf>,
_url: &SocketAddr, ) -> Result<Middleware, String> {
_allowed_hosts: Option<Vec<String>>,
_cors: Option<Vec<String>>,
_auth: Option<(String, String)>,
_all_apis: bool,
) -> Result<WebappServer, String> {
Err("Your Parity version has been compiled without WebApps support.".into()) Err("Your Parity version has been compiled without WebApps support.".into())
} }
} }
@ -169,70 +136,31 @@ mod server {
use super::Dependencies; use super::Dependencies;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::net::SocketAddr;
use std::io;
use ansi_term::Colour; use hash_fetch::fetch::Client as FetchClient;
use ethcore_dapps::{AccessControlAllowOrigin, Host}; use parity_dapps;
use parity_reactor; use parity_reactor;
use rpc_apis;
pub use ethcore_dapps::Server as WebappServer; pub type Middleware = parity_dapps::Middleware<FetchClient>;
pub fn setup_dapps_server<D: rpc_apis::Dependencies>( pub fn dapps_middleware(
deps: Dependencies<D>, deps: Dependencies,
dapps_path: PathBuf, dapps_path: PathBuf,
extra_dapps: Vec<PathBuf>, extra_dapps: Vec<PathBuf>,
url: &SocketAddr, ) -> Result<Middleware, String> {
allowed_hosts: Option<Vec<String>>,
cors: Option<Vec<String>>,
auth: Option<(String, String)>,
all_apis: bool,
) -> Result<WebappServer, String> {
use ethcore_dapps as dapps;
let server = dapps::ServerBuilder::new(
&dapps_path,
deps.contract_client,
parity_reactor::Remote::new(deps.remote.clone()),
);
let allowed_hosts: Option<Vec<_>> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect());
let cors: Option<Vec<_>> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect());
let signer = deps.signer.clone(); let signer = deps.signer.clone();
let server = server let parity_remote = parity_reactor::Remote::new(deps.remote.clone());
.fetch(deps.fetch.clone()) let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token));
.sync_status(deps.sync_status)
.web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token)))
.extra_dapps(&extra_dapps)
.signer_address(deps.signer.address())
.allowed_hosts(allowed_hosts.into())
.extra_cors_headers(cors.into());
let api_set = if all_apis { Ok(parity_dapps::Middleware::new(
warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Dapps with all APIs exposed.")); parity_remote,
info!("If you do not intend this, exit now."); deps.signer.address(),
rpc_apis::ApiSet::SafeContext dapps_path,
} else { extra_dapps,
rpc_apis::ApiSet::UnsafeContext deps.contract_client,
}; deps.sync_status,
let apis = rpc_apis::setup_rpc(deps.stats, &*deps.apis, api_set); web_proxy_tokens,
let start_result = match auth { deps.fetch.clone(),
None => { ))
server.start_unsecured_http(url, apis, deps.remote)
},
Some((username, password)) => {
server.start_basic_auth_http(url, &username, &password, apis, deps.remote)
},
};
match start_result {
Err(dapps::ServerError::IoError(err)) => match err.kind() {
io::ErrorKind::AddrInUse => Err(format!("WebApps address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --dapps-port and --dapps-interface options.", url)),
_ => Err(format!("WebApps io error: {}", err)),
},
Err(e) => Err(format!("WebApps error: {:?}", e)),
Ok(server) => Ok(server),
}
} }
} }

View File

@ -21,94 +21,89 @@ use cli::Args;
pub enum Deprecated { pub enum Deprecated {
DoesNothing(&'static str), DoesNothing(&'static str),
Replaced(&'static str, &'static str), Replaced(&'static str, &'static str),
Removed(&'static str),
} }
impl fmt::Display for Deprecated { impl fmt::Display for Deprecated {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self { match *self {
Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s), Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default.", s),
Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new), Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead.", old, new),
Deprecated::Removed(s) => write!(f, "Option '{}' has been removed and is no longer supported.", s)
} }
} }
} }
impl Deprecated {
fn jsonrpc() -> Self {
Deprecated::DoesNothing("--jsonrpc")
}
fn rpc() -> Self {
Deprecated::DoesNothing("--rpc")
}
fn jsonrpc_off() -> Self {
Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")
}
fn webapp() -> Self {
Deprecated::DoesNothing("--webapp")
}
fn dapps_off() -> Self {
Deprecated::Replaced("--dapps-off", "--no-dapps")
}
fn ipcdisable() -> Self {
Deprecated::Replaced("--ipcdisable", "--no-ipc")
}
fn ipc_off() -> Self {
Deprecated::Replaced("--ipc-off", "--no-ipc")
}
fn etherbase() -> Self {
Deprecated::Replaced("--etherbase", "--author")
}
fn extradata() -> Self {
Deprecated::Replaced("--extradata", "--extra-data")
}
}
pub fn find_deprecated(args: &Args) -> Vec<Deprecated> { pub fn find_deprecated(args: &Args) -> Vec<Deprecated> {
let mut result = vec![]; let mut result = vec![];
if args.flag_jsonrpc { if args.flag_jsonrpc {
result.push(Deprecated::jsonrpc()); result.push(Deprecated::DoesNothing("--jsonrpc"));
} }
if args.flag_rpc { if args.flag_rpc {
result.push(Deprecated::rpc()); result.push(Deprecated::DoesNothing("--rpc"));
} }
if args.flag_jsonrpc_off { if args.flag_jsonrpc_off {
result.push(Deprecated::jsonrpc_off()); result.push(Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"));
} }
if args.flag_webapp { if args.flag_webapp {
result.push(Deprecated::webapp()) result.push(Deprecated::DoesNothing("--webapp"));
} }
if args.flag_dapps_off { if args.flag_dapps_off {
result.push(Deprecated::dapps_off()); result.push(Deprecated::Replaced("--dapps-off", "--no-dapps"));
} }
if args.flag_ipcdisable { if args.flag_ipcdisable {
result.push(Deprecated::ipcdisable()); result.push(Deprecated::Replaced("--ipcdisable", "--no-ipc"));
} }
if args.flag_ipc_off { if args.flag_ipc_off {
result.push(Deprecated::ipc_off()); result.push(Deprecated::Replaced("--ipc-off", "--no-ipc"));
} }
if args.flag_etherbase.is_some() { if args.flag_etherbase.is_some() {
result.push(Deprecated::etherbase()); result.push(Deprecated::Replaced("--etherbase", "--author"));
} }
if args.flag_extradata.is_some() { if args.flag_extradata.is_some() {
result.push(Deprecated::extradata()); result.push(Deprecated::Replaced("--extradata", "--extra-data"));
} }
// Removed in 1.7
if args.flag_dapps_port.is_some() {
result.push(Deprecated::Replaced("--dapps-port", "--jsonrpc-port"));
}
if args.flag_dapps_interface.is_some() {
result.push(Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface"));
}
if args.flag_dapps_hosts.is_some() {
result.push(Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts"));
}
if args.flag_dapps_cors.is_some() {
result.push(Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors"));
}
if args.flag_dapps_user.is_some() {
result.push(Deprecated::Removed("--dapps-user"));
}
if args.flag_dapps_pass.is_some() {
result.push(Deprecated::Removed("--dapps-pass"));
}
if args.flag_dapps_apis_all.is_some() {
result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"));
}
// Removed in 1.8
result result
} }
@ -131,17 +126,31 @@ mod tests {
args.flag_ipc_off = true; args.flag_ipc_off = true;
args.flag_etherbase = Some(Default::default()); args.flag_etherbase = Some(Default::default());
args.flag_extradata = Some(Default::default()); args.flag_extradata = Some(Default::default());
args.flag_dapps_port = Some(Default::default());
args.flag_dapps_interface = Some(Default::default());
args.flag_dapps_hosts = Some(Default::default());
args.flag_dapps_cors = Some(Default::default());
args.flag_dapps_user = Some(Default::default());
args.flag_dapps_pass = Some(Default::default());
args.flag_dapps_apis_all = Some(Default::default());
args args
}), vec![ }), vec![
Deprecated::jsonrpc(), Deprecated::DoesNothing("--jsonrpc"),
Deprecated::rpc(), Deprecated::DoesNothing("--rpc"),
Deprecated::jsonrpc_off(), Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"),
Deprecated::webapp(), Deprecated::DoesNothing("--webapp"),
Deprecated::dapps_off(), Deprecated::Replaced("--dapps-off", "--no-dapps"),
Deprecated::ipcdisable(), Deprecated::Replaced("--ipcdisable", "--no-ipc"),
Deprecated::ipc_off(), Deprecated::Replaced("--ipc-off", "--no-ipc"),
Deprecated::etherbase(), Deprecated::Replaced("--etherbase", "--author"),
Deprecated::extradata(), Deprecated::Replaced("--extradata", "--extra-data"),
Deprecated::Replaced("--dapps-port", "--jsonrpc-port"),
Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface"),
Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts"),
Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors"),
Deprecated::Removed("--dapps-user"),
Deprecated::Removed("--dapps-pass"),
Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"),
]); ]);
} }
} }

View File

@ -15,10 +15,9 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc; use std::sync::Arc;
use parity_ipfs_api::{self, AccessControlAllowOrigin, Host}; use parity_ipfs_api::{self, AccessControlAllowOrigin, Host, Listening};
use parity_ipfs_api::error::ServerError; use parity_ipfs_api::error::ServerError;
use ethcore::client::BlockChainClient; use ethcore::client::BlockChainClient;
use hyper::server::Listening;
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Configuration { pub struct Configuration {

View File

@ -29,7 +29,6 @@ extern crate docopt;
extern crate env_logger; extern crate env_logger;
extern crate fdlimit; extern crate fdlimit;
extern crate futures; extern crate futures;
extern crate hyper;
extern crate isatty; extern crate isatty;
extern crate jsonrpc_core; extern crate jsonrpc_core;
extern crate num_cpus; extern crate num_cpus;
@ -55,6 +54,7 @@ extern crate ethcore_logger;
extern crate ethcore_rpc; extern crate ethcore_rpc;
extern crate ethcore_signer; extern crate ethcore_signer;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate ethkey;
extern crate ethsync; extern crate ethsync;
extern crate parity_hash_fetch as hash_fetch; extern crate parity_hash_fetch as hash_fetch;
extern crate parity_ipfs_api; extern crate parity_ipfs_api;
@ -74,7 +74,11 @@ extern crate ethcore_stratum;
extern crate ethcore_secretstore; extern crate ethcore_secretstore;
#[cfg(feature = "dapps")] #[cfg(feature = "dapps")]
extern crate ethcore_dapps; extern crate parity_dapps;
#[cfg(test)]
#[macro_use]
extern crate pretty_assertions;
#[cfg(windows)] extern crate ws2_32; #[cfg(windows)] extern crate ws2_32;
#[cfg(windows)] extern crate winapi; #[cfg(windows)] extern crate winapi;

View File

@ -14,24 +14,21 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt; use std::{io, fmt};
use std::sync::Arc; use std::sync::Arc;
use std::net::SocketAddr;
use std::io;
use dapps;
use dir::default_data_path; use dir::default_data_path;
use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host};
use ethcore_rpc::informant::{RpcStats, Middleware}; use ethcore_rpc::informant::{RpcStats, Middleware};
use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host};
use helpers::parity_ipc_path; use helpers::parity_ipc_path;
use hyper;
use jsonrpc_core::MetaIoHandler; use jsonrpc_core::MetaIoHandler;
use rpc_apis;
use rpc_apis::ApiSet;
use parity_reactor::TokioRemote; use parity_reactor::TokioRemote;
use rpc_apis::{self, ApiSet};
pub use ethcore_rpc::{IpcServer, HttpServer}; pub use ethcore_rpc::{IpcServer, HttpServer, RequestMiddleware};
#[derive(Debug, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct HttpConfiguration { pub struct HttpConfiguration {
pub enabled: bool, pub enabled: bool,
pub interface: String, pub interface: String,
@ -39,6 +36,7 @@ pub struct HttpConfiguration {
pub apis: ApiSet, pub apis: ApiSet,
pub cors: Option<Vec<String>>, pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>, pub hosts: Option<Vec<String>>,
pub threads: Option<usize>,
} }
impl Default for HttpConfiguration { impl Default for HttpConfiguration {
@ -50,6 +48,7 @@ impl Default for HttpConfiguration {
apis: ApiSet::UnsafeContext, apis: ApiSet::UnsafeContext,
cors: None, cors: None,
hosts: Some(Vec::new()), hosts: Some(Vec::new()),
threads: None,
} }
} }
} }
@ -89,13 +88,17 @@ pub struct Dependencies<D: rpc_apis::Dependencies> {
} }
pub struct RpcExtractor; pub struct RpcExtractor;
impl rpc::HttpMetaExtractor<Metadata> for RpcExtractor { impl rpc::HttpMetaExtractor for RpcExtractor {
fn read_metadata(&self, req: &hyper::server::Request<hyper::net::HttpStream>) -> Metadata { type Metadata = Metadata;
let origin = req.headers().get::<hyper::header::Origin>()
.map(|origin| format!("{}://{}", origin.scheme, origin.host)) fn read_metadata(&self, origin: String, dapps_origin: Option<String>) -> Metadata {
.unwrap_or_else(|| "unknown".into());
let mut metadata = Metadata::default(); let mut metadata = Metadata::default();
metadata.origin = Origin::Rpc(origin);
metadata.origin = match (origin.as_str(), dapps_origin) {
("null", Some(dapp)) => Origin::Dapps(dapp.into()),
_ => Origin::Rpc(origin),
};
metadata metadata
} }
} }
@ -109,56 +112,101 @@ impl rpc::IpcMetaExtractor<Metadata> for RpcExtractor {
} }
} }
pub fn new_http<D>(conf: HttpConfiguration, deps: &Dependencies<D>) -> Result<Option<HttpServer>, String>
where D: rpc_apis::Dependencies
{
if !conf.enabled {
return Ok(None);
}
let url = format!("{}:{}", conf.interface, conf.port);
let addr = url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))?;
Ok(Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis)?))
}
fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>> fn setup_apis<D>(apis: ApiSet, deps: &Dependencies<D>) -> MetaIoHandler<Metadata, Middleware<D::Notifier>>
where D: rpc_apis::Dependencies where D: rpc_apis::Dependencies
{ {
rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis) rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis)
} }
pub fn setup_http_rpc_server<D: rpc_apis::Dependencies>( pub fn new_http<D: rpc_apis::Dependencies>(
dependencies: &Dependencies<D>, conf: HttpConfiguration,
url: &SocketAddr, deps: &Dependencies<D>,
cors_domains: Option<Vec<String>>, middleware: Option<dapps::Middleware>
allowed_hosts: Option<Vec<String>>, ) -> Result<Option<HttpServer>, String> {
apis: ApiSet if !conf.enabled {
) -> Result<HttpServer, String> { return Ok(None);
let handler = setup_apis(apis, dependencies); }
let remote = dependencies.remote.clone();
let cors_domains: Option<Vec<_>> = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); let url = format!("{}:{}", conf.interface, conf.port);
let allowed_hosts: Option<Vec<_>> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); let addr = url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))?;
let start_result = rpc::start_http(url, cors_domains.into(), allowed_hosts.into(), handler, remote, RpcExtractor); let handler = setup_apis(conf.apis, deps);
let remote = deps.remote.clone();
let cors_domains: Option<Vec<_>> = conf.cors.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect());
let allowed_hosts: Option<Vec<_>> = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect());
let start_result = rpc::start_http(
&addr,
cors_domains.into(),
allowed_hosts.into(),
handler,
remote,
RpcExtractor,
match (conf.threads, middleware) {
(Some(threads), None) => rpc::HttpSettings::Threads(threads),
(None, middleware) => rpc::HttpSettings::Dapps(middleware),
(Some(_), Some(_)) => {
return Err("Dapps and fast multi-threaded RPC server cannot be enabled at the same time.".into())
},
}
);
match start_result { match start_result {
Err(HttpServerError::IoError(err)) => match err.kind() { Ok(server) => Ok(Some(server)),
io::ErrorKind::AddrInUse => Err(format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)), Err(HttpServerError::Io(err)) => match err.kind() {
io::ErrorKind::AddrInUse => Err(
format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)
),
_ => Err(format!("RPC io error: {}", err)), _ => Err(format!("RPC io error: {}", err)),
}, },
Err(e) => Err(format!("RPC error: {:?}", e)), Err(e) => Err(format!("RPC error: {:?}", e)),
Ok(server) => Ok(server),
} }
} }
pub fn new_ipc<D: rpc_apis::Dependencies>(conf: IpcConfiguration, deps: &Dependencies<D>) -> Result<Option<IpcServer>, String> { pub fn new_ipc<D: rpc_apis::Dependencies>(
if !conf.enabled { return Ok(None); } conf: IpcConfiguration,
Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) dependencies: &Dependencies<D>
} ) -> Result<Option<IpcServer>, String> {
if !conf.enabled {
pub fn setup_ipc_rpc_server<D: rpc_apis::Dependencies>(dependencies: &Dependencies<D>, addr: &str, apis: ApiSet) -> Result<IpcServer, String> { return Ok(None);
let handler = setup_apis(apis, dependencies); }
let handler = setup_apis(conf.apis, dependencies);
let remote = dependencies.remote.clone(); let remote = dependencies.remote.clone();
match rpc::start_ipc(addr, handler, remote, RpcExtractor) { match rpc::start_ipc(&conf.socket_addr, handler, remote, RpcExtractor) {
Ok(server) => Ok(Some(server)),
Err(io_error) => Err(format!("RPC io error: {}", io_error)), Err(io_error) => Err(format!("RPC io error: {}", io_error)),
Ok(server) => Ok(server) }
}
#[cfg(test)]
mod tests {
use super::RpcExtractor;
use ethcore_rpc::{HttpMetaExtractor, Origin};
#[test]
fn should_extract_rpc_origin() {
// given
let extractor = RpcExtractor;
// when
let meta = extractor.read_metadata("http://parity.io".into(), None);
let meta1 = extractor.read_metadata("http://parity.io".into(), Some("ignored".into()));
// then
assert_eq!(meta.origin, Origin::Rpc("http://parity.io".into()));
assert_eq!(meta1.origin, Origin::Rpc("http://parity.io".into()));
}
#[test]
fn should_dapps_origin() {
// given
let extractor = RpcExtractor;
let dapp = "https://wallet.ethereum.org".to_owned();
// when
let meta = extractor.read_metadata("null".into(), Some(dapp.clone()));
// then
assert_eq!(meta.origin, Origin::Dapps(dapp.into()));
} }
} }

View File

@ -83,7 +83,7 @@ impl FromStr for Api {
} }
} }
#[derive(Debug)] #[derive(Debug, Clone)]
pub enum ApiSet { pub enum ApiSet {
SafeContext, SafeContext,
UnsafeContext, UnsafeContext,

View File

@ -37,7 +37,6 @@ use updater::{UpdatePolicy, Updater};
use parity_reactor::EventLoop; use parity_reactor::EventLoop;
use hash_fetch::fetch::{Fetch, Client as FetchClient}; use hash_fetch::fetch::{Fetch, Client as FetchClient};
use rpc::{HttpConfiguration, IpcConfiguration};
use params::{ use params::{
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool
@ -81,8 +80,8 @@ pub struct RunCmd {
pub daemon: Option<String>, pub daemon: Option<String>,
pub logger_config: LogConfig, pub logger_config: LogConfig,
pub miner_options: MinerOptions, pub miner_options: MinerOptions,
pub http_conf: HttpConfiguration, pub http_conf: rpc::HttpConfiguration,
pub ipc_conf: IpcConfiguration, pub ipc_conf: rpc::IpcConfiguration,
pub net_conf: NetworkConfiguration, pub net_conf: NetworkConfiguration,
pub network_id: Option<u64>, pub network_id: Option<u64>,
pub warp_sync: bool, pub warp_sync: bool,
@ -117,11 +116,7 @@ pub struct RunCmd {
pub light: bool, pub light: bool,
} }
pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> {
if !dapps_conf.enabled {
return Err("Cannot use UI command with Dapps turned off.".into())
}
if !signer_conf.enabled { if !signer_conf.enabled {
return Err("Cannot use UI command with UI turned off.".into()) return Err("Cannot use UI command with UI turned off.".into())
} }
@ -134,12 +129,12 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur
Ok(()) Ok(())
} }
pub fn open_dapp(dapps_conf: &dapps::Configuration, dapp: &str) -> Result<(), String> { pub fn open_dapp(dapps_conf: &dapps::Configuration, rpc_conf: &rpc::HttpConfiguration, dapp: &str) -> Result<(), String> {
if !dapps_conf.enabled { if !dapps_conf.enabled {
return Err("Cannot use DAPP command with Dapps turned off.".into()) return Err("Cannot use DAPP command with Dapps turned off.".into())
} }
let url = format!("http://{}:{}/{}/", dapps_conf.interface, dapps_conf.port, dapp); let url = format!("http://{}:{}/{}/", rpc_conf.interface, rpc_conf.port, dapp);
url::open(&url); url::open(&url);
Ok(()) Ok(())
} }
@ -282,11 +277,11 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
cache: cache, cache: cache,
transaction_queue: txq, transaction_queue: txq,
dapps_interface: match cmd.dapps_conf.enabled { dapps_interface: match cmd.dapps_conf.enabled {
true => Some(cmd.dapps_conf.interface.clone()), true => Some(cmd.http_conf.interface.clone()),
false => None, false => None,
}, },
dapps_port: match cmd.dapps_conf.enabled { dapps_port: match cmd.dapps_conf.enabled {
true => Some(cmd.dapps_conf.port), true => Some(cmd.http_conf.port),
false => None, false => None,
}, },
fetch: fetch, fetch: fetch,
@ -300,7 +295,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
}; };
// start rpc servers // start rpc servers
let _http_server = rpc::new_http(cmd.http_conf, &dependencies)?; let _http_server = rpc::new_http(cmd.http_conf, &dependencies, None)?;
let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
// the signer server // the signer server
@ -329,9 +324,9 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> { pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> {
if cmd.ui && cmd.dapps_conf.enabled { if cmd.ui && cmd.dapps_conf.enabled {
// Check if Parity is already running // Check if Parity is already running
let addr = format!("{}:{}", cmd.dapps_conf.interface, cmd.dapps_conf.port); let addr = format!("{}:{}", cmd.signer_conf.interface, cmd.signer_conf.port);
if !TcpListener::bind(&addr as &str).is_ok() { if !TcpListener::bind(&addr as &str).is_ok() {
return open_ui(&cmd.dapps_conf, &cmd.signer_conf).map(|_| (false, None)); return open_ui(&cmd.signer_conf).map(|_| (false, None));
} }
} }
@ -609,11 +604,11 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
updater: updater.clone(), updater: updater.clone(),
geth_compatibility: cmd.geth_compatibility, geth_compatibility: cmd.geth_compatibility,
dapps_interface: match cmd.dapps_conf.enabled { dapps_interface: match cmd.dapps_conf.enabled {
true => Some(cmd.dapps_conf.interface.clone()), true => Some(cmd.http_conf.interface.clone()),
false => None, false => None,
}, },
dapps_port: match cmd.dapps_conf.enabled { dapps_port: match cmd.dapps_conf.enabled {
true => Some(cmd.dapps_conf.port), true => Some(cmd.http_conf.port),
false => None, false => None,
}, },
fetch: fetch.clone(), fetch: fetch.clone(),
@ -625,26 +620,24 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
stats: rpc_stats.clone(), stats: rpc_stats.clone(),
}; };
// start rpc servers
let http_server = rpc::new_http(cmd.http_conf, &dependencies)?;
let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
// the dapps server // the dapps server
let dapps_deps = { let dapps_deps = {
let (sync, client) = (sync_provider.clone(), client.clone()); let (sync, client) = (sync_provider.clone(), client.clone());
let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() }); let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() });
dapps::Dependencies { dapps::Dependencies {
apis: deps_for_rpc_apis.clone(),
sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())), sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())),
contract_client: contract_client, contract_client: contract_client,
remote: event_loop.raw_remote(), remote: event_loop.raw_remote(),
fetch: fetch.clone(), fetch: fetch.clone(),
signer: deps_for_rpc_apis.signer_service.clone(), signer: deps_for_rpc_apis.signer_service.clone(),
stats: rpc_stats.clone(),
} }
}; };
let dapps_server = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?;
// start rpc servers
let http_server = rpc::new_http(cmd.http_conf.clone(), &dependencies, dapps_middleware)?;
let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?;
// the signer server // the signer server
let signer_deps = signer::Dependencies { let signer_deps = signer::Dependencies {
@ -710,18 +703,18 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
// start ui // start ui
if cmd.ui { if cmd.ui {
open_ui(&cmd.dapps_conf, &cmd.signer_conf)?; open_ui(&cmd.signer_conf)?;
} }
if let Some(dapp) = cmd.dapp { if let Some(dapp) = cmd.dapp {
open_dapp(&cmd.dapps_conf, &dapp)?; open_dapp(&cmd.dapps_conf, &cmd.http_conf, &dapp)?;
} }
// Handle exit // Handle exit
let restart = wait_for_exit(panic_handler, Some(updater), Some(client), can_restart); let restart = wait_for_exit(panic_handler, Some(updater), Some(client), can_restart);
// drop this stuff as soon as exit detected. // drop this stuff as soon as exit detected.
drop((http_server, ipc_server, dapps_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); drop((http_server, ipc_server, signer_server, secretstore_key_server, ipfs_server, event_loop));
info!("Finishing work, please wait..."); info!("Finishing work, please wait...");

View File

@ -53,6 +53,7 @@ mod server {
#[cfg(feature="secretstore")] #[cfg(feature="secretstore")]
mod server { mod server {
use ethkey;
use ethcore_secretstore; use ethcore_secretstore;
use super::{Configuration, Dependencies}; use super::{Configuration, Dependencies};
@ -64,10 +65,35 @@ mod server {
impl KeyServer { impl KeyServer {
/// Create new key server /// Create new key server
pub fn new(conf: Configuration, _deps: Dependencies) -> Result<Self, String> { pub fn new(conf: Configuration, _deps: Dependencies) -> Result<Self, String> {
let key_pairs = vec![
ethkey::KeyPair::from_secret("6c26a76e9b31048d170873a791401c7e799a11f0cefc0171cc31a49800967509".parse().unwrap()).unwrap(),
ethkey::KeyPair::from_secret("7e94018b3731afdb3b4e6f4c3e179475640166da12e1d1b0c7d80729b1a5b452".parse().unwrap()).unwrap(),
ethkey::KeyPair::from_secret("5ab6ed2a52c33142380032c39a03a86b12eacb3fa4b53bc16d84f51318156f8c".parse().unwrap()).unwrap(),
];
let conf = ethcore_secretstore::ServiceConfiguration { let conf = ethcore_secretstore::ServiceConfiguration {
listener_addr: conf.interface, listener_address: ethcore_secretstore::NodeAddress {
listener_port: conf.port, address: conf.interface.clone(),
data_path: conf.data_path, port: conf.port,
},
data_path: conf.data_path.clone(),
// TODO: this is test configuration. how it will be configured in production?
cluster_config: ethcore_secretstore::ClusterConfiguration {
threads: 4,
self_private: (***key_pairs[(conf.port - 8082) as usize].secret()).into(),
listener_address: ethcore_secretstore::NodeAddress {
address: conf.interface.clone(),
port: conf.port + 10,
},
nodes: key_pairs.iter().enumerate().map(|(i, kp)| (kp.public().clone(),
ethcore_secretstore::NodeAddress {
address: conf.interface.clone(),
port: 8082 + 10 + (i as u16),
})).collect(),
allow_connecting_to_higher_nodes: true,
encryption_config: ethcore_secretstore::EncryptionConfiguration {
key_check_timeout_ms: 1000,
},
}
}; };
let key_server = ethcore_secretstore::start(conf) let key_server = ethcore_secretstore::start(conf)

View File

@ -21,6 +21,7 @@ transient-hashmap = "0.4"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
jsonrpc-minihttp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }

View File

@ -29,8 +29,9 @@ extern crate time;
extern crate transient_hashmap; extern crate transient_hashmap;
extern crate jsonrpc_core; extern crate jsonrpc_core;
pub extern crate jsonrpc_http_server as http; extern crate jsonrpc_http_server as http;
pub extern crate jsonrpc_ipc_server as ipc; extern crate jsonrpc_minihttp_server as minihttp;
extern crate jsonrpc_ipc_server as ipc;
extern crate ethash; extern crate ethash;
extern crate ethcore; extern crate ethcore;
@ -62,10 +63,15 @@ extern crate ethjson;
#[cfg(test)] #[cfg(test)]
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
mod metadata;
pub mod v1; pub mod v1;
pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext}; pub use ipc::{Server as IpcServer, MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext};
pub use http::{HttpMetaExtractor, Server as HttpServer, Error as HttpServerError, AccessControlAllowOrigin, Host}; pub use http::{
hyper,
RequestMiddleware, RequestMiddlewareAction,
AccessControlAllowOrigin, Host,
};
pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch}; pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings, Metadata, Origin, informant, dispatch};
pub use v1::block_import::is_major_importing; pub use v1::block_import::is_major_importing;
@ -73,26 +79,98 @@ pub use v1::block_import::is_major_importing;
use std::net::SocketAddr; use std::net::SocketAddr;
use http::tokio_core; use http::tokio_core;
/// RPC HTTP Server instance
pub enum HttpServer {
/// Fast MiniHTTP variant
Mini(minihttp::Server),
/// Hyper variant
Hyper(http::Server),
}
/// RPC HTTP Server error
#[derive(Debug)]
pub enum HttpServerError {
/// IO error
Io(::std::io::Error),
/// Other hyper error
Hyper(hyper::Error),
}
impl From<http::Error> for HttpServerError {
fn from(e: http::Error) -> Self {
use self::HttpServerError::*;
match e {
http::Error::Io(io) => Io(io),
http::Error::Other(hyper) => Hyper(hyper),
}
}
}
impl From<minihttp::Error> for HttpServerError {
fn from(e: minihttp::Error) -> Self {
use self::HttpServerError::*;
match e {
minihttp::Error::Io(io) => Io(io),
}
}
}
/// HTTP RPC server impl-independent metadata extractor
pub trait HttpMetaExtractor: Send + Sync + 'static {
/// Type of Metadata
type Metadata: jsonrpc_core::Metadata;
/// Extracts metadata from given params.
fn read_metadata(&self, origin: String, dapps_origin: Option<String>) -> Self::Metadata;
}
/// HTTP server implementation-specific settings.
pub enum HttpSettings<R: RequestMiddleware> {
/// Enable fast minihttp server with given number of threads.
Threads(usize),
/// Enable standard server with optional dapps middleware.
Dapps(Option<R>),
}
/// Start http server asynchronously and returns result with `Server` handle on success or an error. /// Start http server asynchronously and returns result with `Server` handle on success or an error.
pub fn start_http<M, S, H, T>( pub fn start_http<M, S, H, T, R>(
addr: &SocketAddr, addr: &SocketAddr,
cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>, cors_domains: http::DomainsValidation<http::AccessControlAllowOrigin>,
allowed_hosts: http::DomainsValidation<http::Host>, allowed_hosts: http::DomainsValidation<http::Host>,
handler: H, handler: H,
remote: tokio_core::reactor::Remote, remote: tokio_core::reactor::Remote,
extractor: T, extractor: T,
settings: HttpSettings<R>,
) -> Result<HttpServer, HttpServerError> where ) -> Result<HttpServer, HttpServerError> where
M: jsonrpc_core::Metadata, M: jsonrpc_core::Metadata,
S: jsonrpc_core::Middleware<M>, S: jsonrpc_core::Middleware<M>,
H: Into<jsonrpc_core::MetaIoHandler<M, S>>, H: Into<jsonrpc_core::MetaIoHandler<M, S>>,
T: HttpMetaExtractor<M>, T: HttpMetaExtractor<Metadata=M>,
R: RequestMiddleware,
{ {
http::ServerBuilder::new(handler) Ok(match settings {
HttpSettings::Dapps(middleware) => {
let mut builder = http::ServerBuilder::new(handler)
.event_loop_remote(remote) .event_loop_remote(remote)
.meta_extractor(extractor) .meta_extractor(metadata::HyperMetaExtractor::new(extractor))
.cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into());
if let Some(dapps) = middleware {
builder = builder.request_middleware(dapps)
}
builder.start_http(addr)
.map(HttpServer::Hyper)?
},
HttpSettings::Threads(threads) => {
minihttp::ServerBuilder::new(handler)
.threads(threads)
.meta_extractor(metadata::MiniMetaExtractor::new(extractor))
.cors(cors_domains.into()) .cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into()) .allowed_hosts(allowed_hosts.into())
.start_http(addr) .start_http(addr)
.map(HttpServer::Mini)?
},
})
} }
/// Start ipc server asynchronously and returns result with `Server` handle on success or an error. /// Start ipc server asynchronously and returns result with `Server` handle on success or an error.

74
rpc/src/metadata.rs Normal file
View File

@ -0,0 +1,74 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use jsonrpc_core;
use http;
use hyper;
use minihttp;
use HttpMetaExtractor;
pub struct HyperMetaExtractor<T> {
extractor: T,
}
impl<T> HyperMetaExtractor<T> {
pub fn new(extractor: T) -> Self {
HyperMetaExtractor {
extractor: extractor,
}
}
}
impl<M, T> http::MetaExtractor<M> for HyperMetaExtractor<T> where
T: HttpMetaExtractor<Metadata = M>,
M: jsonrpc_core::Metadata,
{
fn read_metadata(&self, req: &hyper::server::Request<hyper::net::HttpStream>) -> M {
let origin = req.headers().get::<hyper::header::Origin>()
.map(|origin| format!("{}://{}", origin.scheme, origin.host))
.unwrap_or_else(|| "unknown".into());
let dapps_origin = req.headers().get_raw("x-parity-origin")
.and_then(|raw| raw.one())
.map(|raw| String::from_utf8_lossy(raw).into_owned());
self.extractor.read_metadata(origin, dapps_origin)
}
}
pub struct MiniMetaExtractor<T> {
extractor: T,
}
impl<T> MiniMetaExtractor<T> {
pub fn new(extractor: T) -> Self {
MiniMetaExtractor {
extractor: extractor,
}
}
}
impl<M, T> minihttp::MetaExtractor<M> for MiniMetaExtractor<T> where
T: HttpMetaExtractor<Metadata = M>,
M: jsonrpc_core::Metadata,
{
fn read_metadata(&self, req: &minihttp::Req) -> M {
let origin = req.header("origin")
.unwrap_or_else(|| "unknown")
.to_owned();
let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned());
self.extractor.read_metadata(origin, dapps_origin)
}
}

View File

@ -5,7 +5,7 @@ export TARGETS="
-p ethash \ -p ethash \
-p ethcore \ -p ethcore \
-p ethcore-bigint\ -p ethcore-bigint\
-p ethcore-dapps \ -p parity-dapps \
-p ethcore-rpc \ -p ethcore-rpc \
-p ethcore-signer \ -p ethcore-signer \
-p ethcore-util \ -p ethcore-util \

View File

@ -10,9 +10,19 @@ build = "build.rs"
ethcore-ipc-codegen = { path = "../ipc/codegen" } ethcore-ipc-codegen = { path = "../ipc/codegen" }
[dependencies] [dependencies]
byteorder = "1.0"
log = "0.3" log = "0.3"
parking_lot = "0.4" parking_lot = "0.4"
hyper = { version = "0.10", default-features = false } hyper = { version = "0.10", default-features = false }
serde = "0.9"
serde_json = "0.9"
serde_derive = "0.9"
futures = "0.1"
futures-cpupool = "0.1"
rustc-serialize = "0.3"
tokio-core = "0.1"
tokio-service = "0.1"
tokio-proto = "0.1"
url = "1.0" url = "1.0"
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }

View File

@ -14,7 +14,6 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use hyper::header; use hyper::header;
use hyper::uri::RequestUri; use hyper::uri::RequestUri;
@ -39,7 +38,9 @@ pub struct KeyServerHttpListener<T: KeyServer + 'static> {
enum Request { enum Request {
/// Invalid request /// Invalid request
Invalid, Invalid,
/// Request encryption key of given document for given requestor /// Generate encryption key.
GenerateDocumentKey(DocumentAddress, RequestSignature, usize),
/// Request encryption key of given document for given requestor.
GetDocumentKey(DocumentAddress, RequestSignature), GetDocumentKey(DocumentAddress, RequestSignature),
} }
@ -63,9 +64,9 @@ impl<T> KeyServerHttpListener<T> where T: KeyServer + 'static {
handler: shared_handler.clone(), handler: shared_handler.clone(),
}; };
let listener_addr: &str = &format!("{}:{}", config.listener_addr, config.listener_port); let listener_addr: &str = &format!("{}:{}", config.listener_address.address, config.listener_address.port);
let http_server = HttpServer::http(&listener_addr).unwrap(); let http_server = HttpServer::http(&listener_addr).expect("cannot start HttpServer");
let http_server = http_server.handle(handler).unwrap(); let http_server = http_server.handle(handler).expect("cannot start HttpServer");
let listener = KeyServerHttpListener { let listener = KeyServerHttpListener {
_http_server: http_server, _http_server: http_server,
handler: shared_handler, handler: shared_handler,
@ -75,6 +76,10 @@ impl<T> KeyServerHttpListener<T> where T: KeyServer + 'static {
} }
impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static { impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error> {
self.handler.key_server.generate_document_key(signature, document, threshold)
}
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> { fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> {
self.handler.key_server.document_key(signature, document) self.handler.key_server.document_key(signature, document)
} }
@ -82,33 +87,51 @@ impl<T> KeyServer for KeyServerHttpListener<T> where T: KeyServer + 'static {
impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static { impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
fn handle(&self, req: HttpRequest, mut res: HttpResponse) { fn handle(&self, req: HttpRequest, mut res: HttpResponse) {
if req.method != HttpMethod::Get {
warn!(target: "secretstore", "Ignoring {}-request {}", req.method, req.uri);
*res.status_mut() = HttpStatusCode::NotFound;
return;
}
if req.headers.has::<header::Origin>() { if req.headers.has::<header::Origin>() {
warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri); warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri);
*res.status_mut() = HttpStatusCode::NotFound; *res.status_mut() = HttpStatusCode::NotFound;
return; return;
} }
match req.uri { let req_method = req.method.clone();
RequestUri::AbsolutePath(ref path) => match parse_request(&path) { let req_uri = req.uri.clone();
Request::GetDocumentKey(document, signature) => { match &req_uri {
let document_key = self.handler.key_server.document_key(&signature, &document) &RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path) {
Request::GenerateDocumentKey(document, signature, threshold) => {
return_document_key(req, res, self.handler.key_server.generate_document_key(&signature, &document, threshold)
.map_err(|err| { .map_err(|err| {
warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req.uri, err); warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err);
err err
}); }));
},
Request::GetDocumentKey(document, signature) => {
return_document_key(req, res, self.handler.key_server.document_key(&signature, &document)
.map_err(|err| {
warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err);
err
}));
},
Request::Invalid => {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
*res.status_mut() = HttpStatusCode::BadRequest;
},
},
_ => {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri);
*res.status_mut() = HttpStatusCode::NotFound;
},
};
}
}
fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Result<DocumentEncryptedKey, Error>) {
match document_key { match document_key {
Ok(document_key) => { Ok(document_key) => {
let document_key = document_key.to_hex().into_bytes(); let document_key = document_key.to_hex().into_bytes();
res.headers_mut().set(header::ContentType::plaintext()); res.headers_mut().set(header::ContentType::plaintext());
if let Err(err) = res.send(&document_key) { if let Err(err) = res.send(&document_key) {
// nothing to do, but log error // nothing to do, but to log an error
warn!(target: "secretstore", "GetDocumentKey request {} response has failed with: {}", req.uri, err); warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err);
} }
}, },
Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest,
@ -117,60 +140,50 @@ impl<T> HttpHandler for KeyServerHttpHandler<T> where T: KeyServer + 'static {
Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError,
Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError,
} }
},
Request::Invalid => {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri);
*res.status_mut() = HttpStatusCode::BadRequest;
},
},
_ => {
warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri);
*res.status_mut() = HttpStatusCode::NotFound;
},
};
}
} }
fn parse_request(uri_path: &str) -> Request { fn parse_request(method: &HttpMethod, uri_path: &str) -> Request {
let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() { let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() {
Ok(path) => path, Ok(path) => path,
Err(_) => return Request::Invalid, Err(_) => return Request::Invalid,
}; };
let path: Vec<String> = uri_path.trim_left_matches('/').split('/').map(Into::into).collect(); let path: Vec<String> = uri_path.trim_left_matches('/').split('/').map(Into::into).collect();
if path.len() != 2 || path[0].is_empty() || path[1].is_empty() { if path.len() < 2 || path[0].is_empty() || path[1].is_empty() {
return Request::Invalid; return Request::Invalid;
} }
let document = DocumentAddress::from_str(&path[0]); let args_len = path.len();
let signature = RequestSignature::from_str(&path[1]); let document = path[0].parse();
match (document, signature) { let signature = path[1].parse();
(Ok(document), Ok(signature)) => Request::GetDocumentKey(document, signature), let threshold = (if args_len > 2 { &path[2] } else { "" }).parse();
match (args_len, method, document, signature, threshold) {
(3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold),
(2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature),
_ => Request::Invalid, _ => Request::Invalid,
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr; use hyper::method::Method as HttpMethod;
use super::super::RequestSignature;
use super::{parse_request, Request}; use super::{parse_request, Request};
#[test] #[test]
fn parse_request_successful() { fn parse_request_successful() {
assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"),
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
assert_eq!(parse_request("/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"),
Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(),
RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap()));
} }
#[test] #[test]
fn parse_request_failed() { fn parse_request_failed() {
assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid);
assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid);
assert_eq!(parse_request("/a/b"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/a/b"), Request::Invalid);
assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid);
} }
} }

View File

@ -14,42 +14,78 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::sync::Arc;
use std::sync::mpsc;
use futures::{self, Future};
use parking_lot::Mutex;
use tokio_core::reactor::Core;
use ethcrypto; use ethcrypto;
use ethkey; use ethkey;
use super::acl_storage::AclStorage; use super::acl_storage::AclStorage;
use super::key_storage::KeyStorage; use super::key_storage::KeyStorage;
use key_server_cluster::ClusterCore;
use traits::KeyServer; use traits::KeyServer;
use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey}; use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, ClusterConfiguration};
use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration};
/// Secret store key server implementation /// Secret store key server implementation
pub struct KeyServerImpl<T: AclStorage, U: KeyStorage> { pub struct KeyServerImpl {
acl_storage: T, data: Arc<Mutex<KeyServerCore>>,
key_storage: U,
} }
impl<T, U> KeyServerImpl<T, U> where T: AclStorage, U: KeyStorage { /// Secret store key server data.
pub struct KeyServerCore {
close: Option<futures::Complete<()>>,
handle: Option<thread::JoinHandle<()>>,
cluster: Option<Arc<ClusterClient>>,
}
impl KeyServerImpl {
/// Create new key server instance /// Create new key server instance
pub fn new(acl_storage: T, key_storage: U) -> Self { pub fn new(config: &ClusterConfiguration, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
KeyServerImpl { Ok(KeyServerImpl {
acl_storage: acl_storage, data: Arc::new(Mutex::new(KeyServerCore::new(config, acl_storage, key_storage)?)),
key_storage: key_storage, })
} }
#[cfg(test)]
/// Get cluster client reference.
pub fn cluster(&self) -> Arc<ClusterClient> {
self.data.lock().cluster.clone()
.expect("cluster can be None in test cfg only; test cfg is for correct tests; qed")
} }
} }
impl<T, U> KeyServer for KeyServerImpl<T, U> where T: AclStorage, U: KeyStorage { impl KeyServer for KeyServerImpl {
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error> {
// recover requestor' public key from signature
let public = ethkey::recover(signature, document)
.map_err(|_| Error::BadSignature)?;
// generate document key
let data = self.data.lock();
let encryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed")
.new_encryption_session(document.clone(), threshold)?;
let document_key = encryption_session.wait()?;
// encrypt document key with requestor public key
let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key)
.map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?;
Ok(document_key)
}
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> { fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error> {
// recover requestor' public key from signature // recover requestor' public key from signature
let public = ethkey::recover(signature, document) let public = ethkey::recover(signature, document)
.map_err(|_| Error::BadSignature)?; .map_err(|_| Error::BadSignature)?;
// check that requestor has access to the document // decrypt document key
if !self.acl_storage.check(&public, document)? { let data = self.data.lock();
return Err(Error::AccessDenied); let decryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed")
} .new_decryption_session(document.clone(), signature.clone())?;
let document_key = decryption_session.wait()?;
// read unencrypted document key
let document_key = self.key_storage.get(document)?;
// encrypt document key with requestor public key // encrypt document key with requestor public key
let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key)
.map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?;
@ -57,68 +93,132 @@ impl<T, U> KeyServer for KeyServerImpl<T, U> where T: AclStorage, U: KeyStorage
} }
} }
impl KeyServerCore {
pub fn new(config: &ClusterConfiguration, acl_storage: Arc<AclStorage>, key_storage: Arc<KeyStorage>) -> Result<Self, Error> {
let config = NetClusterConfiguration {
threads: config.threads,
self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?,
listen_address: (config.listener_address.address.clone(), config.listener_address.port),
nodes: config.nodes.iter()
.map(|(node_id, node_address)| (node_id.clone(), (node_address.address.clone(), node_address.port)))
.collect(),
allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes,
encryption_config: config.encryption_config.clone(),
acl_storage: acl_storage,
key_storage: key_storage,
};
let (stop, stopped) = futures::oneshot();
let (tx, rx) = mpsc::channel();
let handle = thread::spawn(move || {
let mut el = match Core::new() {
Ok(el) => el,
Err(e) => {
tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread.");
return;
},
};
let cluster = ClusterCore::new(el.handle(), config);
let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client()));
tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread.");
let _ = el.run(futures::empty().select(stopped));
});
let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??;
Ok(KeyServerCore {
close: Some(stop),
handle: Some(handle),
cluster: Some(cluster),
})
}
}
impl Drop for KeyServerCore {
fn drop(&mut self) {
self.close.take().map(|v| v.send(()));
self.handle.take().map(|h| h.join());
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str::FromStr; use std::time;
use std::sync::Arc;
use ethcrypto; use ethcrypto;
use ethkey::{self, Secret}; use ethkey::{self, Random, Generator};
use acl_storage::DummyAclStorage; use acl_storage::DummyAclStorage;
use key_storage::KeyStorage;
use key_storage::tests::DummyKeyStorage; use key_storage::tests::DummyKeyStorage;
use super::super::{Error, RequestSignature, DocumentAddress}; use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration, DocumentEncryptedKey, DocumentKey};
use super::super::{RequestSignature, DocumentAddress};
use super::{KeyServer, KeyServerImpl}; use super::{KeyServer, KeyServerImpl};
const DOCUMENT1: &'static str = "0000000000000000000000000000000000000000000000000000000000000001"; const DOCUMENT1: &'static str = "0000000000000000000000000000000000000000000000000000000000000001";
const DOCUMENT2: &'static str = "0000000000000000000000000000000000000000000000000000000000000002";
const KEY1: &'static str = "key1";
const PRIVATE1: &'static str = "03055e18a8434dcc9061cc1b81c4ef84dc7cf4574d755e52cdcf0c8898b25b11"; const PRIVATE1: &'static str = "03055e18a8434dcc9061cc1b81c4ef84dc7cf4574d755e52cdcf0c8898b25b11";
const PUBLIC2: &'static str = "dfe62f56bb05fbd85b485bac749f3410309e24b352bac082468ce151e9ddb94fa7b5b730027fe1c7c5f3d5927621d269f91aceb5caa3c7fe944677a22f88a318";
const PRIVATE2: &'static str = "0eb3816f4f705fa0fd952fb27b71b8c0606f09f4743b5b65cbc375bd569632f2";
fn create_key_server() -> KeyServerImpl<DummyAclStorage, DummyKeyStorage> {
let acl_storage = DummyAclStorage::default();
let key_storage = DummyKeyStorage::default();
key_storage.insert(DOCUMENT1.into(), KEY1.into()).unwrap();
acl_storage.prohibit(PUBLIC2.into(), DOCUMENT1.into());
KeyServerImpl::new(acl_storage, key_storage)
}
fn make_signature(secret: &str, document: &'static str) -> RequestSignature { fn make_signature(secret: &str, document: &'static str) -> RequestSignature {
let secret = Secret::from_str(secret).unwrap(); let secret = secret.parse().unwrap();
let document: DocumentAddress = document.into(); let document: DocumentAddress = document.into();
ethkey::sign(&secret, &document).unwrap() ethkey::sign(&secret, &document).unwrap()
} }
fn decrypt_document_key(secret: &str, document_key: DocumentEncryptedKey) -> DocumentKey {
let secret = secret.parse().unwrap();
ethcrypto::ecies::decrypt_single_message(&secret, &document_key).unwrap()
}
#[test] #[test]
fn document_key_succeeds() { fn document_key_generation_and_retrievement_works_over_network() {
let key_server = create_key_server(); //::util::log::init_log();
let num_nodes = 3;
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
threads: 1,
self_private: (***key_pairs[i].secret()).into(),
listener_address: NodeAddress {
address: "127.0.0.1".into(),
port: 6060 + (i as u16),
},
nodes: key_pairs.iter().enumerate().map(|(j, kp)| (kp.public().clone(),
NodeAddress {
address: "127.0.0.1".into(),
port: 6060 + (j as u16),
})).collect(),
allow_connecting_to_higher_nodes: false,
encryption_config: EncryptionConfiguration {
key_check_timeout_ms: 10,
},
}).collect();
let key_servers: Vec<_> = configs.into_iter().map(|cfg|
KeyServerImpl::new(&cfg, Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap()
).collect();
// wait until connections are established
let start = time::Instant::now();
loop {
if key_servers.iter().all(|ks| ks.cluster().cluster_state().connected.len() == num_nodes - 1) {
break;
}
if time::Instant::now() - start > time::Duration::from_millis(30000) {
panic!("connections are not established in 30000ms");
}
}
let test_cases = [0, 1, 2];
for threshold in &test_cases {
// generate document key
// TODO: it is an error that we can regenerate key for the same DOCUMENT
let signature = make_signature(PRIVATE1, DOCUMENT1); let signature = make_signature(PRIVATE1, DOCUMENT1);
let document_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); let generated_key = key_servers[0].generate_document_key(&signature, &DOCUMENT1.into(), *threshold).unwrap();
let document_key = ethcrypto::ecies::decrypt_single_message(&Secret::from_str(PRIVATE1).unwrap(), &document_key); let generated_key = decrypt_document_key(PRIVATE1, generated_key);
assert_eq!(document_key, Ok(KEY1.into()));
}
#[test] // now let's try to retrieve key back
fn document_key_fails_when_bad_signature() { for key_server in key_servers.iter() {
let key_server = create_key_server(); let retrieved_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap();
let signature = RequestSignature::default(); let retrieved_key = decrypt_document_key(PRIVATE1, retrieved_key);
let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); assert_eq!(retrieved_key, generated_key);
assert_eq!(document_key, Err(Error::BadSignature));
} }
#[test]
fn document_key_fails_when_acl_check_fails() {
let key_server = create_key_server();
let signature = make_signature(PRIVATE2, DOCUMENT1);
let document_key = key_server.document_key(&signature, &DOCUMENT1.into());
assert_eq!(document_key, Err(Error::AccessDenied));
} }
#[test]
fn document_key_fails_when_document_not_found() {
let key_server = create_key_server();
let signature = make_signature(PRIVATE1, DOCUMENT2);
let document_key = key_server.document_key(&signature, &DOCUMENT2.into());
assert_eq!(document_key, Err(Error::DocumentNotFound));
} }
} }

View File

@ -14,11 +14,42 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use key_server_cluster::{Error, NodeId}; use std::io;
use key_server_cluster::message::Message; use std::time;
use std::sync::Arc;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::collections::btree_map::Entry;
use std::net::{SocketAddr, IpAddr};
use futures::{finished, failed, Future, Stream, BoxFuture};
use futures_cpupool::CpuPool;
use parking_lot::{RwLock, Mutex};
use tokio_core::io::IoFuture;
use tokio_core::reactor::{Handle, Remote, Timeout, Interval};
use tokio_core::net::{TcpListener, TcpStream};
use ethkey::{Secret, KeyPair, Signature, Random, Generator};
use key_server_cluster::{Error, NodeId, SessionId, EncryptionConfiguration, AclStorage, KeyStorage};
use key_server_cluster::message::{self, Message, ClusterMessage, EncryptionMessage, DecryptionMessage};
use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, DecryptionSessionId,
SessionParams as DecryptionSessionParams, Session as DecryptionSession};
use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState,
SessionParams as EncryptionSessionParams, Session as EncryptionSession};
use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message};
use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection};
pub type BoxedEmptyFuture = BoxFuture<(), ()>;
/// Cluster interface for external clients.
pub trait ClusterClient: Send + Sync {
/// Get cluster state.
fn cluster_state(&self) -> ClusterState;
/// Start new encryption session.
fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result<Arc<EncryptionSession>, Error>;
/// Start new decryption session.
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result<Arc<DecryptionSession>, Error>;
}
/// Cluster access for single encryption/decryption participant. /// Cluster access for single encryption/decryption participant.
pub trait Cluster { pub trait Cluster: Send + Sync {
/// Broadcast message to all other nodes. /// Broadcast message to all other nodes.
fn broadcast(&self, message: Message) -> Result<(), Error>; fn broadcast(&self, message: Message) -> Result<(), Error>;
/// Send message to given node. /// Send message to given node.
@ -27,13 +58,841 @@ pub trait Cluster {
fn blacklist(&self, node: &NodeId); fn blacklist(&self, node: &NodeId);
} }
#[derive(Clone)]
/// Cluster initialization parameters.
pub struct ClusterConfiguration {
/// Number of threads reserved by cluster.
pub threads: usize,
/// Allow connecting to 'higher' nodes.
pub allow_connecting_to_higher_nodes: bool,
/// KeyPair this node holds.
pub self_key_pair: KeyPair,
/// Interface to listen to.
pub listen_address: (String, u16),
/// Cluster nodes.
pub nodes: BTreeMap<NodeId, (String, u16)>,
/// Encryption session configuration.
pub encryption_config: EncryptionConfiguration,
/// Reference to key storage
pub key_storage: Arc<KeyStorage>,
/// Reference to ACL storage
pub acl_storage: Arc<AclStorage>,
}
/// Cluster state.
pub struct ClusterState {
/// Nodes, to which connections are established.
pub connected: BTreeSet<NodeId>,
}
/// Network cluster implementation.
pub struct ClusterCore {
/// Handle to the event loop.
handle: Handle,
/// Listen address.
listen_address: SocketAddr,
/// Cluster data.
data: Arc<ClusterData>,
}
/// Network cluster client interface implementation.
pub struct ClusterClientImpl {
/// Cluster data.
data: Arc<ClusterData>,
}
/// Network cluster view. It is a communication channel, required in single session.
pub struct ClusterView {
core: Arc<Mutex<ClusterViewCore>>,
}
/// Cross-thread shareable cluster data.
pub struct ClusterData {
/// Cluster configuration.
config: ClusterConfiguration,
/// Handle to the event loop.
handle: Remote,
/// Handle to the cpu thread pool.
pool: CpuPool,
/// KeyPair this node holds.
self_key_pair: KeyPair,
/// Connections data.
connections: ClusterConnections,
/// Active sessions data.
sessions: ClusterSessions,
}
/// Connections that are forming the cluster.
pub struct ClusterConnections {
/// Self node id.
pub self_node_id: NodeId,
/// All known other key servers.
pub nodes: BTreeMap<NodeId, SocketAddr>,
/// Active connections to key servers.
pub connections: RwLock<BTreeMap<NodeId, Arc<Connection>>>,
}
/// Active sessions on this cluster.
pub struct ClusterSessions {
/// Self node id.
pub self_node_id: NodeId,
/// Reference to key storage
pub key_storage: Arc<KeyStorage>,
/// Reference to ACL storage
pub acl_storage: Arc<AclStorage>,
/// Active encryption sessions.
pub encryption_sessions: RwLock<BTreeMap<SessionId, QueuedEncryptionSession>>,
/// Active decryption sessions.
pub decryption_sessions: RwLock<BTreeMap<DecryptionSessionId, QueuedDecryptionSession>>,
}
/// Encryption session and its message queue.
pub struct QueuedEncryptionSession {
/// Encryption session.
pub session: Arc<EncryptionSessionImpl>,
/// Messages queue.
pub queue: VecDeque<(NodeId, EncryptionMessage)>,
}
/// Decryption session and its message queue.
pub struct QueuedDecryptionSession {
/// Decryption session.
pub session: Arc<DecryptionSessionImpl>,
/// Messages queue.
pub queue: VecDeque<(NodeId, DecryptionMessage)>,
}
/// Cluster view core.
struct ClusterViewCore {
/// Cluster reference.
cluster: Arc<ClusterData>,
/// Subset of nodes, required for this session.
nodes: BTreeSet<NodeId>,
}
/// Connection to single node.
pub struct Connection {
/// Node id.
node_id: NodeId,
/// Node address.
node_address: SocketAddr,
/// Is inbound connection?
is_inbound: bool,
/// Tcp stream.
stream: SharedTcpStream,
/// Connection key.
key: Secret,
/// Last message time.
last_message_time: Mutex<time::Instant>,
}
impl ClusterCore {
pub fn new(handle: Handle, config: ClusterConfiguration) -> Result<Arc<Self>, Error> {
let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?;
let connections = ClusterConnections::new(&config)?;
let sessions = ClusterSessions::new(&config);
let data = ClusterData::new(&handle, config, connections, sessions);
Ok(Arc::new(ClusterCore {
handle: handle,
listen_address: listen_address,
data: data,
}))
}
/// Create new client interface.
pub fn client(&self) -> Arc<ClusterClient> {
Arc::new(ClusterClientImpl::new(self.data.clone()))
}
#[cfg(test)]
/// Get cluster configuration.
pub fn config(&self) -> &ClusterConfiguration {
&self.data.config
}
#[cfg(test)]
/// Get connection to given node.
pub fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
self.data.connection(node)
}
/// Run cluster
pub fn run(&self) -> Result<(), Error> {
// try to connect to every other peer
ClusterCore::connect_disconnected_nodes(self.data.clone());
// schedule maintain procedures
ClusterCore::schedule_maintain(&self.handle, self.data.clone());
// start listening for incoming connections
self.handle.spawn(ClusterCore::listen(&self.handle, self.data.clone(), self.listen_address.clone())?);
Ok(())
}
/// Connect to peer.
fn connect(data: Arc<ClusterData>, node_address: SocketAddr) {
data.handle.clone().spawn(move |handle| {
data.pool.clone().spawn(ClusterCore::connect_future(handle, data, node_address))
})
}
/// Connect to socket using given context and handle.
fn connect_future(handle: &Handle, data: Arc<ClusterData>, node_address: SocketAddr) -> BoxedEmptyFuture {
let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect();
net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes)
.then(move |result| ClusterCore::process_connection_result(data, false, result))
.then(|_| finished(()))
.boxed()
}
/// Start listening for incoming connections.
fn listen(handle: &Handle, data: Arc<ClusterData>, listen_address: SocketAddr) -> Result<BoxedEmptyFuture, Error> {
Ok(TcpListener::bind(&listen_address, &handle)?
.incoming()
.and_then(move |(stream, node_address)| {
ClusterCore::accept_connection(data.clone(), stream, node_address);
Ok(())
})
.for_each(|_| Ok(()))
.then(|_| finished(()))
.boxed())
}
/// Accept connection.
fn accept_connection(data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) {
data.handle.clone().spawn(move |handle| {
data.pool.clone().spawn(ClusterCore::accept_connection_future(handle, data, stream, node_address))
})
}
/// Accept connection future.
fn accept_connection_future(handle: &Handle, data: Arc<ClusterData>, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture {
let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect();
net_accept_connection(node_address, stream, handle, data.self_key_pair.clone(), disconnected_nodes)
.then(move |result| ClusterCore::process_connection_result(data, true, result))
.then(|_| finished(()))
.boxed()
}
/// Schedule mainatain procedures.
fn schedule_maintain(handle: &Handle, data: Arc<ClusterData>) {
// TODO: per-session timeouts (node can respond to messages, but ignore sessions messages)
let (d1, d2, d3) = (data.clone(), data.clone(), data.clone());
let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(10, 0), handle)
.expect("failed to create interval")
.and_then(move |_| Ok(trace!(target: "secretstore_net", "{}: executing maintain procedures", d1.self_key_pair.public())))
.and_then(move |_| Ok(ClusterCore::keep_alive(d2.clone())))
.and_then(move |_| Ok(ClusterCore::connect_disconnected_nodes(d3.clone())))
.for_each(|_| Ok(()))
.then(|_| finished(()))
.boxed();
data.spawn(interval);
}
/// Called for every incomming mesage.
fn process_connection_messages(data: Arc<ClusterData>, connection: Arc<Connection>) -> IoFuture<Result<(), Error>> {
connection
.read_message()
.then(move |result|
match result {
Ok((_, Ok(message))) => {
ClusterCore::process_connection_message(data.clone(), connection.clone(), message);
// continue serving connection
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
finished(Ok(())).boxed()
},
Ok((_, Err(err))) => {
warn!(target: "secretstore_net", "{}: protocol error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
// continue serving connection
data.spawn(ClusterCore::process_connection_messages(data.clone(), connection));
finished(Err(err)).boxed()
},
Err(err) => {
warn!(target: "secretstore_net", "{}: network error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id());
// close connection
data.connections.remove(connection.node_id(), connection.is_inbound());
failed(err).boxed()
},
}
).boxed()
}
/// Send keepalive messages to every othe node.
fn keep_alive(data: Arc<ClusterData>) {
for connection in data.connections.active_connections() {
let last_message_diff = time::Instant::now() - connection.last_message_time();
if last_message_diff > time::Duration::from_secs(60) {
data.connections.remove(connection.node_id(), connection.is_inbound());
data.sessions.on_connection_timeout(connection.node_id());
}
else if last_message_diff > time::Duration::from_secs(30) {
data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {}))));
}
}
}
/// Try to connect to every disconnected node.
fn connect_disconnected_nodes(data: Arc<ClusterData>) {
for (node_id, node_address) in data.connections.disconnected_nodes() {
if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id {
ClusterCore::connect(data.clone(), node_address);
}
}
}
/// Process connection future result.
fn process_connection_result(data: Arc<ClusterData>, is_inbound: bool, result: Result<DeadlineStatus<Result<NetConnection, Error>>, io::Error>) -> IoFuture<Result<(), Error>> {
match result {
Ok(DeadlineStatus::Meet(Ok(connection))) => {
let connection = Connection::new(is_inbound, connection);
if data.connections.insert(connection.clone()) {
ClusterCore::process_connection_messages(data.clone(), connection)
} else {
finished(Ok(())).boxed()
}
},
Ok(DeadlineStatus::Meet(Err(_))) => {
finished(Ok(())).boxed()
},
Ok(DeadlineStatus::Timeout) => {
finished(Ok(())).boxed()
},
Err(_) => {
// network error
finished(Ok(())).boxed()
},
}
}
/// Process single message from the connection.
fn process_connection_message(data: Arc<ClusterData>, connection: Arc<Connection>, message: Message) {
connection.set_last_message_time(time::Instant::now());
trace!(target: "secretstore_net", "{}: processing message {} from {}", data.self_key_pair.public(), message, connection.node_id());
match message {
Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message),
Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message),
Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message),
}
}
/// Process single encryption message from the connection.
fn process_encryption_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: EncryptionMessage) {
let mut sender = connection.node_id().clone();
let mut is_queued_message = false;
let session_id = message.session_id().clone();
let key_check_timeout_ms = data.config.encryption_config.key_check_timeout_ms;
loop {
let result = match message {
EncryptionMessage::InitializeSession(ref message) => {
let mut connected_nodes = data.connections.connected_nodes();
connected_nodes.insert(data.self_key_pair.public().clone());
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
let session_id: SessionId = message.session.clone().into();
data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster)
.and_then(|s| s.on_initialize_session(sender.clone(), message))
},
EncryptionMessage::ConfirmInitialization(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_confirm_initialization(sender.clone(), message)),
EncryptionMessage::CompleteInitialization(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_complete_initialization(sender.clone(), message)),
EncryptionMessage::KeysDissemination(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| {
// TODO: move this logic to session (or session connector)
let is_in_key_check_state = s.state() == EncryptionSessionState::KeyCheck;
let result = s.on_keys_dissemination(sender.clone(), message);
if !is_in_key_check_state && s.state() == EncryptionSessionState::KeyCheck {
let session = s.clone();
let d = data.clone();
data.handle.spawn(move |handle|
Timeout::new(time::Duration::new(key_check_timeout_ms / 1000, 0), handle)
.expect("failed to create timeout")
.and_then(move |_| {
if let Err(error) = session.start_key_generation_phase() {
session.on_session_error(d.self_key_pair.public().clone(), &message::SessionError {
session: session.id().clone().into(),
error: error.into(),
});
}
Ok(())
})
.then(|_| finished(()))
);
}
result
}),
EncryptionMessage::Complaint(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_complaint(sender.clone(), message)),
EncryptionMessage::ComplaintResponse(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_complaint_response(sender.clone(), message)),
EncryptionMessage::PublicKeyShare(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_public_key_share(sender.clone(), message)),
EncryptionMessage::SessionError(ref message) => {
if let Some(s) = data.sessions.encryption_session(&*message.session) {
data.sessions.remove_encryption_session(s.id());
s.on_session_error(sender.clone(), message);
}
Ok(())
},
EncryptionMessage::SessionCompleted(ref message) => data.sessions.encryption_session(&*message.session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| {
let result = s.on_session_completed(sender.clone(), message);
if result.is_ok() && s.state() == EncryptionSessionState::Finished {
data.sessions.remove_encryption_session(s.id());
}
result
}),
};
match result {
Err(Error::TooEarlyForRequest) => {
data.sessions.enqueue_encryption_message(&session_id, sender, message, is_queued_message);
break;
},
Err(err) => {
warn!(target: "secretstore_net", "{}: error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender);
if let Some(connection) = data.connections.get(&sender) {
data.spawn(connection.send_message(Message::Encryption(EncryptionMessage::SessionError(message::SessionError {
session: session_id.clone().into(),
error: format!("{:?}", err),
}))));
}
if err != Error::InvalidSessionId {
data.sessions.remove_encryption_session(&session_id);
}
break;
},
_ => {
match data.sessions.dequeue_encryption_message(&session_id) {
Some((msg_sender, msg)) => {
is_queued_message = true;
sender = msg_sender;
message = msg;
},
None => break,
}
},
}
}
}
/// Process single decryption message from the connection.
fn process_decryption_message(data: Arc<ClusterData>, connection: Arc<Connection>, mut message: DecryptionMessage) {
let mut sender = connection.node_id().clone();
let mut is_queued_message = false;
let session_id = message.session_id().clone();
let sub_session_id = message.sub_session_id().clone();
loop {
let result = match message {
DecryptionMessage::InitializeDecryptionSession(ref message) => {
let mut connected_nodes = data.connections.connected_nodes();
connected_nodes.insert(data.self_key_pair.public().clone());
let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes));
data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster)
.and_then(|s| s.on_initialize_session(sender.clone(), message))
},
DecryptionMessage::ConfirmDecryptionInitialization(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_confirm_initialization(sender.clone(), message)),
DecryptionMessage::RequestPartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_partial_decryption_requested(sender.clone(), message)),
DecryptionMessage::PartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session)
.ok_or(Error::InvalidSessionId)
.and_then(|s| s.on_partial_decryption(sender.clone(), message)),
DecryptionMessage::DecryptionSessionError(ref message) => {
if let Some(s) = data.sessions.decryption_session(&*message.session, &*message.sub_session) {
data.sessions.remove_decryption_session(&session_id, &sub_session_id);
s.on_session_error(sender.clone(), message);
}
Ok(())
},
};
match result {
Err(Error::TooEarlyForRequest) => {
data.sessions.enqueue_decryption_message(&session_id, &sub_session_id, sender, message, is_queued_message);
break;
},
Err(err) => {
if let Some(connection) = data.connections.get(&sender) {
data.spawn(connection.send_message(Message::Decryption(DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError {
session: session_id.clone().into(),
sub_session: sub_session_id.clone().into(),
error: format!("{:?}", err),
}))));
}
if err != Error::InvalidSessionId {
data.sessions.remove_decryption_session(&session_id, &sub_session_id);
}
break;
},
_ => {
match data.sessions.dequeue_decryption_message(&session_id, &sub_session_id) {
Some((msg_sender, msg)) => {
is_queued_message = true;
sender = msg_sender;
message = msg;
},
None => break,
}
},
}
}
}
/// Process single cluster message from the connection.
fn process_cluster_message(data: Arc<ClusterData>, connection: Arc<Connection>, message: ClusterMessage) {
match message {
ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {})))),
ClusterMessage::KeepAliveResponse(_) => (),
_ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()),
}
}
}
impl ClusterConnections {
pub fn new(config: &ClusterConfiguration) -> Result<Self, Error> {
let mut connections = ClusterConnections {
self_node_id: config.self_key_pair.public().clone(),
nodes: BTreeMap::new(),
connections: RwLock::new(BTreeMap::new()),
};
for (node_id, &(ref node_addr, node_port)) in config.nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) {
let socket_address = make_socket_address(&node_addr, node_port)?;
connections.nodes.insert(node_id.clone(), socket_address);
}
Ok(connections)
}
pub fn cluster_state(&self) -> ClusterState {
ClusterState {
connected: self.connections.read().keys().cloned().collect(),
}
}
pub fn get(&self, node: &NodeId) -> Option<Arc<Connection>> {
self.connections.read().get(node).cloned()
}
pub fn insert(&self, connection: Arc<Connection>) -> bool {
let mut connections = self.connections.write();
if connections.contains_key(connection.node_id()) {
// we have already connected to the same node
// the agreement is that node with lower id must establish connection to node with higher id
if (&self.self_node_id < connection.node_id() && connection.is_inbound())
|| (&self.self_node_id > connection.node_id() && !connection.is_inbound()) {
return false;
}
}
trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address());
connections.insert(connection.node_id().clone(), connection);
true
}
pub fn remove(&self, node: &NodeId, is_inbound: bool) {
let mut connections = self.connections.write();
if let Entry::Occupied(entry) = connections.entry(node.clone()) {
if entry.get().is_inbound() != is_inbound {
return;
}
trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address());
entry.remove_entry();
}
}
pub fn connected_nodes(&self) -> BTreeSet<NodeId> {
self.connections.read().keys().cloned().collect()
}
pub fn active_connections(&self)-> Vec<Arc<Connection>> {
self.connections.read().values().cloned().collect()
}
pub fn disconnected_nodes(&self) -> BTreeMap<NodeId, SocketAddr> {
let connections = self.connections.read();
self.nodes.iter()
.filter(|&(node_id, _)| !connections.contains_key(node_id))
.map(|(node_id, node_address)| (node_id.clone(), node_address.clone()))
.collect()
}
}
impl ClusterSessions {
pub fn new(config: &ClusterConfiguration) -> Self {
ClusterSessions {
self_node_id: config.self_key_pair.public().clone(),
acl_storage: config.acl_storage.clone(),
key_storage: config.key_storage.clone(),
encryption_sessions: RwLock::new(BTreeMap::new()),
decryption_sessions: RwLock::new(BTreeMap::new()),
}
}
pub fn new_encryption_session(&self, _master: NodeId, session_id: SessionId, cluster: Arc<Cluster>) -> Result<Arc<EncryptionSessionImpl>, Error> {
let mut encryption_sessions = self.encryption_sessions.write();
if encryption_sessions.contains_key(&session_id) {
return Err(Error::DuplicateSessionId);
}
let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams {
id: session_id.clone(),
self_node_id: self.self_node_id.clone(),
key_storage: self.key_storage.clone(),
cluster: cluster,
}));
let encryption_session = QueuedEncryptionSession {
session: session.clone(),
queue: VecDeque::new()
};
encryption_sessions.insert(session_id, encryption_session);
Ok(session)
}
pub fn remove_encryption_session(&self, session_id: &SessionId) {
self.encryption_sessions.write().remove(session_id);
}
pub fn encryption_session(&self, session_id: &SessionId) -> Option<Arc<EncryptionSessionImpl>> {
self.encryption_sessions.read().get(session_id).map(|s| s.session.clone())
}
pub fn enqueue_encryption_message(&self, session_id: &SessionId, sender: NodeId, message: EncryptionMessage, is_queued_message: bool) {
self.encryption_sessions.write().get_mut(session_id)
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
else { session.queue.push_back((sender, message)) });
}
pub fn dequeue_encryption_message(&self, session_id: &SessionId) -> Option<(NodeId, EncryptionMessage)> {
self.encryption_sessions.write().get_mut(session_id)
.and_then(|session| session.queue.pop_front())
}
pub fn new_decryption_session(&self, _master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc<Cluster>) -> Result<Arc<DecryptionSessionImpl>, Error> {
let mut decryption_sessions = self.decryption_sessions.write();
let session_id = DecryptionSessionId::new(session_id, sub_session_id);
if decryption_sessions.contains_key(&session_id) {
return Err(Error::DuplicateSessionId);
}
let session = Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams {
id: session_id.id.clone(),
access_key: session_id.access_key.clone(),
self_node_id: self.self_node_id.clone(),
encrypted_data: self.key_storage.get(&session_id.id).map_err(|e| Error::KeyStorage(e.into()))?,
acl_storage: self.acl_storage.clone(),
cluster: cluster,
})?);
let decryption_session = QueuedDecryptionSession {
session: session.clone(),
queue: VecDeque::new()
};
decryption_sessions.insert(session_id, decryption_session);
Ok(session)
}
pub fn remove_decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) {
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
self.decryption_sessions.write().remove(&session_id);
}
pub fn decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<Arc<DecryptionSessionImpl>> {
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
self.decryption_sessions.read().get(&session_id).map(|s| s.session.clone())
}
pub fn enqueue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret, sender: NodeId, message: DecryptionMessage, is_queued_message: bool) {
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
self.decryption_sessions.write().get_mut(&session_id)
.map(|session| if is_queued_message { session.queue.push_front((sender, message)) }
else { session.queue.push_back((sender, message)) });
}
pub fn dequeue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<(NodeId, DecryptionMessage)> {
let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone());
self.decryption_sessions.write().get_mut(&session_id)
.and_then(|session| session.queue.pop_front())
}
pub fn on_connection_timeout(&self, node_id: &NodeId) {
for encryption_session in self.encryption_sessions.read().values() {
encryption_session.session.on_session_timeout(node_id);
}
for decryption_session in self.decryption_sessions.read().values() {
decryption_session.session.on_session_timeout(node_id);
}
}
}
impl ClusterData {
pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc<Self> {
Arc::new(ClusterData {
handle: handle.remote().clone(),
pool: CpuPool::new(config.threads),
self_key_pair: config.self_key_pair.clone(),
connections: connections,
sessions: sessions,
config: config,
})
}
/// Get connection to given node.
pub fn connection(&self, node: &NodeId) -> Option<Arc<Connection>> {
self.connections.get(node)
}
/// Spawns a future using thread pool and schedules execution of it with event loop handle.
pub fn spawn<F>(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static {
let pool_work = self.pool.spawn(f);
self.handle.spawn(move |_handle| {
pool_work.then(|_| finished(()))
})
}
}
impl Connection {
pub fn new(is_inbound: bool, connection: NetConnection) -> Arc<Connection> {
Arc::new(Connection {
node_id: connection.node_id,
node_address: connection.address,
is_inbound: is_inbound,
stream: connection.stream,
key: connection.key,
last_message_time: Mutex::new(time::Instant::now()),
})
}
pub fn is_inbound(&self) -> bool {
self.is_inbound
}
pub fn node_id(&self) -> &NodeId {
&self.node_id
}
pub fn last_message_time(&self) -> time::Instant {
*self.last_message_time.lock()
}
pub fn set_last_message_time(&self, last_message_time: time::Instant) {
*self.last_message_time.lock() = last_message_time;
}
pub fn node_address(&self) -> &SocketAddr {
&self.node_address
}
pub fn send_message(&self, message: Message) -> WriteMessage<SharedTcpStream> {
write_encrypted_message(self.stream.clone(), &self.key, message)
}
pub fn read_message(&self) -> ReadMessage<SharedTcpStream> {
read_encrypted_message(self.stream.clone(), self.key.clone())
}
}
impl ClusterView {
pub fn new(cluster: Arc<ClusterData>, nodes: BTreeSet<NodeId>) -> Self {
ClusterView {
core: Arc::new(Mutex::new(ClusterViewCore {
cluster: cluster,
nodes: nodes,
})),
}
}
}
impl Cluster for ClusterView {
fn broadcast(&self, message: Message) -> Result<(), Error> {
let core = self.core.lock();
for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) {
let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?;
core.cluster.spawn(connection.send_message(message.clone()))
}
Ok(())
}
fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> {
let core = self.core.lock();
let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?;
core.cluster.spawn(connection.send_message(message));
Ok(())
}
fn blacklist(&self, _node: &NodeId) {
// TODO: unimplemented!()
}
}
impl ClusterClientImpl {
pub fn new(data: Arc<ClusterData>) -> Self {
ClusterClientImpl {
data: data,
}
}
}
impl ClusterClient for ClusterClientImpl {
fn cluster_state(&self) -> ClusterState {
self.data.connections.cluster_state()
}
fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result<Arc<EncryptionSession>, Error> {
let mut connected_nodes = self.data.connections.connected_nodes();
connected_nodes.insert(self.data.self_key_pair.public().clone());
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?;
session.initialize(threshold, connected_nodes)?;
Ok(session)
}
fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result<Arc<DecryptionSession>, Error> {
let mut connected_nodes = self.data.connections.connected_nodes();
connected_nodes.insert(self.data.self_key_pair.public().clone());
let access_key = Random.generate()?.secret().clone();
let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone()));
let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key, cluster)?;
session.initialize(requestor_signature)?;
Ok(session)
}
}
fn make_socket_address(address: &str, port: u16) -> Result<SocketAddr, Error> {
let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?;
Ok(SocketAddr::new(ip_address, port))
}
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use std::sync::Arc;
use std::time;
use std::collections::VecDeque; use std::collections::VecDeque;
use parking_lot::Mutex; use parking_lot::Mutex;
use key_server_cluster::{NodeId, Error}; use tokio_core::reactor::Core;
use ethkey::{Random, Generator};
use key_server_cluster::{NodeId, Error, EncryptionConfiguration, DummyAclStorage, DummyKeyStorage};
use key_server_cluster::message::Message; use key_server_cluster::message::Message;
use key_server_cluster::cluster::Cluster; use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration};
#[derive(Debug)] #[derive(Debug)]
pub struct DummyCluster { pub struct DummyCluster {
@ -87,4 +946,61 @@ pub mod tests {
fn blacklist(&self, _node: &NodeId) { fn blacklist(&self, _node: &NodeId) {
} }
} }
pub fn loop_until<F>(core: &mut Core, timeout: time::Duration, predicate: F) where F: Fn() -> bool {
let start = time::Instant::now();
loop {
core.turn(Some(time::Duration::from_millis(1)));
if predicate() {
break;
}
if time::Instant::now() - start > timeout {
panic!("no result in {:?}", timeout);
}
}
}
pub fn all_connections_established(cluster: &Arc<ClusterCore>) -> bool {
cluster.config().nodes.keys()
.filter(|p| *p != cluster.config().self_key_pair.public())
.all(|p| cluster.connection(p).is_some())
}
pub fn make_clusters(core: &Core, ports_begin: u16, num_nodes: usize) -> Vec<Arc<ClusterCore>> {
let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect();
let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration {
threads: 1,
self_key_pair: key_pairs[i].clone(),
listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16),
nodes: key_pairs.iter().enumerate()
.map(|(j, kp)| (kp.public().clone(), ("127.0.0.1".into(), ports_begin + j as u16)))
.collect(),
allow_connecting_to_higher_nodes: false,
encryption_config: EncryptionConfiguration {
key_check_timeout_ms: 10,
},
key_storage: Arc::new(DummyKeyStorage::default()),
acl_storage: Arc::new(DummyAclStorage::default()),
}).collect();
let clusters: Vec<_> = cluster_params.into_iter().enumerate()
.map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap())
.collect();
clusters
}
pub fn run_clusters(clusters: &[Arc<ClusterCore>]) {
for cluster in clusters {
cluster.run().unwrap();
}
}
#[test]
fn cluster_connects_to_other_nodes() {
let mut core = Core::new().unwrap();
let clusters = make_clusters(&core, 6010, 3);
run_clusters(&clusters);
loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established));
}
} }

View File

@ -14,15 +14,22 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::cmp::{Ord, PartialOrd, Ordering};
use std::collections::{BTreeSet, BTreeMap}; use std::collections::{BTreeSet, BTreeMap};
use std::sync::Arc; use std::sync::Arc;
use parking_lot::Mutex; use parking_lot::{Mutex, Condvar};
use ethkey::{self, Secret, Public, Signature}; use ethkey::{self, Secret, Public, Signature};
use key_server_cluster::{Error, AclStorage, EncryptedData, NodeId, SessionId}; use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId};
use key_server_cluster::cluster::Cluster; use key_server_cluster::cluster::Cluster;
use key_server_cluster::math; use key_server_cluster::math;
use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmDecryptionInitialization, use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecryptionSession, ConfirmDecryptionInitialization,
RequestPartialDecryption, PartialDecryption}; RequestPartialDecryption, PartialDecryption, DecryptionSessionError};
/// Decryption session API.
pub trait Session: Send + Sync + 'static {
/// Wait until session is completed. Returns distributely restored secret key.
fn wait(&self) -> Result<Public, Error>;
}
/// Distributed decryption session. /// Distributed decryption session.
/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper:
@ -32,7 +39,7 @@ use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmD
/// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the document /// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the document
/// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption /// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption
/// 4) decryption: master node receives all partial decryptions of the secret and restores the secret /// 4) decryption: master node receives all partial decryptions of the secret and restores the secret
pub struct Session { pub struct SessionImpl {
/// Encryption session id. /// Encryption session id.
id: SessionId, id: SessionId,
/// Decryption session access key. /// Decryption session access key.
@ -40,25 +47,36 @@ pub struct Session {
/// Public identifier of this node. /// Public identifier of this node.
self_node_id: NodeId, self_node_id: NodeId,
/// Encrypted data. /// Encrypted data.
encrypted_data: EncryptedData, encrypted_data: DocumentKeyShare,
/// ACL storate to check access to the resource. /// ACL storate to check access to the resource.
acl_storage: Arc<AclStorage>, acl_storage: Arc<AclStorage>,
/// Cluster which allows this node to send messages to other nodes in the cluster. /// Cluster which allows this node to send messages to other nodes in the cluster.
cluster: Arc<Cluster>, cluster: Arc<Cluster>,
/// SessionImpl completion condvar.
completed: Condvar,
/// Mutable session data. /// Mutable session data.
data: Mutex<SessionData>, data: Mutex<SessionData>,
} }
/// Session creation parameters /// Decryption session Id.
pub struct SessionParams { #[derive(Debug, Clone, PartialEq, Eq)]
/// Session identifier. pub struct DecryptionSessionId {
/// Encryption session id.
pub id: SessionId, pub id: SessionId,
/// Session access key. /// Decryption session access key.
pub access_key: Secret,
}
/// SessionImpl creation parameters
pub struct SessionParams {
/// SessionImpl identifier.
pub id: SessionId,
/// SessionImpl access key.
pub access_key: Secret, pub access_key: Secret,
/// Id of node, on which this session is running. /// Id of node, on which this session is running.
pub self_node_id: Public, pub self_node_id: Public,
/// Encrypted data (result of running encryption_session::Session). /// Encrypted data (result of running encryption_session::SessionImpl).
pub encrypted_data: EncryptedData, pub encrypted_data: DocumentKeyShare,
/// ACL storage. /// ACL storage.
pub acl_storage: Arc<AclStorage>, pub acl_storage: Arc<AclStorage>,
/// Cluster /// Cluster
@ -91,16 +109,11 @@ struct SessionData {
/// === Values, filled during final decryption === /// === Values, filled during final decryption ===
/// Decrypted secret /// Decrypted secret
decrypted_secret: Option<Public>, decrypted_secret: Option<Result<Public, Error>>,
}
#[derive(Debug)]
struct NodeData {
/// Node-generated shadow point.
shadow_point: Option<Public>,
} }
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
/// Decryption session data.
pub enum SessionState { pub enum SessionState {
/// Every node starts in this state. /// Every node starts in this state.
WaitingForInitialization, WaitingForInitialization,
@ -116,18 +129,19 @@ pub enum SessionState {
Failed, Failed,
} }
impl Session { impl SessionImpl {
/// Create new decryption session. /// Create new decryption session.
pub fn new(params: SessionParams) -> Result<Self, Error> { pub fn new(params: SessionParams) -> Result<Self, Error> {
check_encrypted_data(&params.self_node_id, &params.encrypted_data)?; check_encrypted_data(&params.self_node_id, &params.encrypted_data)?;
Ok(Session { Ok(SessionImpl {
id: params.id, id: params.id,
access_key: params.access_key, access_key: params.access_key,
self_node_id: params.self_node_id, self_node_id: params.self_node_id,
encrypted_data: params.encrypted_data, encrypted_data: params.encrypted_data,
acl_storage: params.acl_storage, acl_storage: params.acl_storage,
cluster: params.cluster, cluster: params.cluster,
completed: Condvar::new(),
data: Mutex::new(SessionData { data: Mutex::new(SessionData {
state: SessionState::WaitingForInitialization, state: SessionState::WaitingForInitialization,
master: None, master: None,
@ -146,19 +160,22 @@ impl Session {
&self.self_node_id &self.self_node_id
} }
#[cfg(test)]
/// Get this session access key. /// Get this session access key.
pub fn access_key(&self) -> &Secret { pub fn access_key(&self) -> &Secret {
&self.access_key &self.access_key
} }
#[cfg(test)]
/// Get current session state. /// Get current session state.
pub fn state(&self) -> SessionState { pub fn state(&self) -> SessionState {
self.data.lock().state.clone() self.data.lock().state.clone()
} }
#[cfg(test)]
/// Get decrypted secret /// Get decrypted secret
pub fn decrypted_secret(&self) -> Option<Public> { pub fn decrypted_secret(&self) -> Option<Public> {
self.data.lock().decrypted_secret.clone() self.data.lock().decrypted_secret.clone().and_then(|r| r.ok())
} }
/// Initialize decryption session. /// Initialize decryption session.
@ -188,15 +205,20 @@ impl Session {
// not enough nodes => pass initialization message to all other nodes // not enough nodes => pass initialization message to all other nodes
SessionState::WaitingForInitializationConfirm => { SessionState::WaitingForInitializationConfirm => {
for node in self.encrypted_data.id_numbers.keys().filter(|n| *n != self.node()) { for node in self.encrypted_data.id_numbers.keys().filter(|n| *n != self.node()) {
self.cluster.send(node, Message::InitializeDecryptionSession(InitializeDecryptionSession { self.cluster.send(node, Message::Decryption(DecryptionMessage::InitializeDecryptionSession(InitializeDecryptionSession {
session: self.id.clone(), session: self.id.clone().into(),
sub_session: self.access_key.clone(), sub_session: self.access_key.clone().into(),
requestor_signature: requestor_signature.clone(), requestor_signature: requestor_signature.clone().into(),
}))?; })))?;
} }
}, },
// we can decrypt data on our own // we can decrypt data on our own
SessionState::WaitingForPartialDecryption => unimplemented!(), SessionState::WaitingForPartialDecryption => {
data.confirmed_nodes.insert(self.node().clone());
SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data)?;
SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?;
self.completed.notify_all();
},
// we can not decrypt data // we can not decrypt data
SessionState::Failed => (), SessionState::Failed => (),
// cannot reach other states // cannot reach other states
@ -207,9 +229,9 @@ impl Session {
} }
/// When session initialization message is received. /// When session initialization message is received.
pub fn on_initialize_session(&self, sender: NodeId, message: InitializeDecryptionSession) -> Result<(), Error> { pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeDecryptionSession) -> Result<(), Error> {
debug_assert!(self.id == message.session); debug_assert!(self.id == *message.session);
debug_assert!(self.access_key == message.sub_session); debug_assert!(self.access_key == *message.sub_session);
debug_assert!(&sender != self.node()); debug_assert!(&sender != self.node());
let mut data = self.data.lock(); let mut data = self.data.lock();
@ -230,17 +252,17 @@ impl Session {
// respond to master node // respond to master node
data.master = Some(sender.clone()); data.master = Some(sender.clone());
self.cluster.send(&sender, Message::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { self.cluster.send(&sender, Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization {
session: self.id.clone(), session: self.id.clone().into(),
sub_session: self.access_key.clone(), sub_session: self.access_key.clone().into(),
is_confirmed: is_requestor_allowed_to_read, is_confirmed: is_requestor_allowed_to_read,
})) })))
} }
/// When session initialization confirmation message is reeived. /// When session initialization confirmation message is reeived.
pub fn on_confirm_initialization(&self, sender: NodeId, message: ConfirmDecryptionInitialization) -> Result<(), Error> { pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmDecryptionInitialization) -> Result<(), Error> {
debug_assert!(self.id == message.session); debug_assert!(self.id == *message.session);
debug_assert!(self.access_key == message.sub_session); debug_assert!(self.access_key == *message.sub_session);
debug_assert!(&sender != self.node()); debug_assert!(&sender != self.node());
let mut data = self.data.lock(); let mut data = self.data.lock();
@ -260,26 +282,8 @@ impl Session {
// we do not yet have enough nodes for decryption // we do not yet have enough nodes for decryption
SessionState::WaitingForInitializationConfirm => Ok(()), SessionState::WaitingForInitializationConfirm => Ok(()),
// we have enough nodes for decryption // we have enough nodes for decryption
SessionState::WaitingForPartialDecryption => { SessionState::WaitingForPartialDecryption =>
let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data),
for node in data.confirmed_nodes.iter().filter(|n| n != &self.node()) {
self.cluster.send(node, Message::RequestPartialDecryption(RequestPartialDecryption {
session: self.id.clone(),
sub_session: self.access_key.clone(),
nodes: confirmed_nodes.clone(),
}))?;
}
assert!(data.confirmed_nodes.remove(self.node()));
let shadow_point = {
let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed");
do_partial_decryption(self.node(), &requestor, &data.confirmed_nodes, &self.access_key, &self.encrypted_data)?
};
data.shadow_points.insert(self.node().clone(), shadow_point);
Ok(())
},
// we can not have enough nodes for decryption // we can not have enough nodes for decryption
SessionState::Failed => Ok(()), SessionState::Failed => Ok(()),
// cannot reach other states // cannot reach other states
@ -288,9 +292,9 @@ impl Session {
} }
/// When partial decryption is requested. /// When partial decryption is requested.
pub fn on_partial_decryption_requested(&self, sender: NodeId, message: RequestPartialDecryption) -> Result<(), Error> { pub fn on_partial_decryption_requested(&self, sender: NodeId, message: &RequestPartialDecryption) -> Result<(), Error> {
debug_assert!(self.id == message.session); debug_assert!(self.id == *message.session);
debug_assert!(self.access_key == message.sub_session); debug_assert!(self.access_key == *message.sub_session);
debug_assert!(&sender != self.node()); debug_assert!(&sender != self.node());
// check message // check message
@ -311,13 +315,13 @@ impl Session {
// calculate shadow point // calculate shadow point
let shadow_point = { let shadow_point = {
let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed"); let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed");
do_partial_decryption(self.node(), &requestor, &message.nodes, &self.access_key, &self.encrypted_data)? do_partial_decryption(self.node(), &requestor, &message.nodes.iter().cloned().map(Into::into).collect(), &self.access_key, &self.encrypted_data)?
}; };
self.cluster.send(&sender, Message::PartialDecryption(PartialDecryption { self.cluster.send(&sender, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption {
session: self.id.clone(), session: self.id.clone().into(),
sub_session: self.access_key.clone(), sub_session: self.access_key.clone().into(),
shadow_point: shadow_point, shadow_point: shadow_point.into(),
}))?; })))?;
// update sate // update sate
data.state = SessionState::Finished; data.state = SessionState::Finished;
@ -326,9 +330,9 @@ impl Session {
} }
/// When partial decryption is received. /// When partial decryption is received.
pub fn on_partial_decryption(&self, sender: NodeId, message: PartialDecryption) -> Result<(), Error> { pub fn on_partial_decryption(&self, sender: NodeId, message: &PartialDecryption) -> Result<(), Error> {
debug_assert!(self.id == message.session); debug_assert!(self.id == *message.session);
debug_assert!(self.access_key == message.sub_session); debug_assert!(self.access_key == *message.sub_session);
debug_assert!(&sender != self.node()); debug_assert!(&sender != self.node());
let mut data = self.data.lock(); let mut data = self.data.lock();
@ -341,24 +345,113 @@ impl Session {
if !data.confirmed_nodes.remove(&sender) { if !data.confirmed_nodes.remove(&sender) {
return Err(Error::InvalidStateForRequest); return Err(Error::InvalidStateForRequest);
} }
data.shadow_points.insert(sender, message.shadow_point); data.shadow_points.insert(sender, message.shadow_point.clone().into());
// check if we have enough shadow points to decrypt the secret // check if we have enough shadow points to decrypt the secret
if data.shadow_points.len() != self.encrypted_data.threshold + 1 { if data.shadow_points.len() != self.encrypted_data.threshold + 1 {
return Ok(()); return Ok(());
} }
SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?;
self.completed.notify_all();
Ok(())
}
/// When error has occured on another node.
pub fn on_session_error(&self, sender: NodeId, message: &DecryptionSessionError) {
warn!("{}: decryption session error: {:?} from {}", self.node(), message, sender);
let mut data = self.data.lock();
data.state = SessionState::Failed;
data.decrypted_secret = Some(Err(Error::Io(message.error.clone())));
self.completed.notify_all();
}
/// When session timeout has occured.
pub fn on_session_timeout(&self, _node: &NodeId) {
warn!("{}: decryption session timeout", self.node());
let mut data = self.data.lock();
// TODO: check that node is a part of decryption process
data.state = SessionState::Failed;
data.decrypted_secret = Some(Err(Error::Io("session expired".into())));
self.completed.notify_all();
}
fn start_waiting_for_partial_decryption(self_node_id: NodeId, session_id: SessionId, access_key: Secret, cluster: &Arc<Cluster>, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> {
let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone();
for node in data.confirmed_nodes.iter().filter(|n| n != &&self_node_id) {
cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption {
session: session_id.clone().into(),
sub_session: access_key.clone().into(),
nodes: confirmed_nodes.iter().cloned().map(Into::into).collect(),
})))?;
}
assert!(data.confirmed_nodes.remove(&self_node_id));
let shadow_point = {
let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed");
do_partial_decryption(&self_node_id, &requestor, &data.confirmed_nodes, &access_key, &encrypted_data)?
};
data.shadow_points.insert(self_node_id.clone(), shadow_point);
Ok(())
}
fn do_decryption(access_key: Secret, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> {
// decrypt the secret using shadow points // decrypt the secret using shadow points
let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values())?; let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values())?;
let decrypted_secret = math::decrypt_with_joint_shadow(&self.access_key, &self.encrypted_data.encrypted_point, &joint_shadow_point)?; let decrypted_secret = math::decrypt_with_joint_shadow(encrypted_data.threshold, &access_key, &encrypted_data.encrypted_point, &joint_shadow_point)?;
data.decrypted_secret = Some(decrypted_secret); data.decrypted_secret = Some(Ok(decrypted_secret));
// switch to completed state
data.state = SessionState::Finished; data.state = SessionState::Finished;
Ok(()) Ok(())
} }
} }
fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) -> Result<(), Error> { impl Session for SessionImpl {
fn wait(&self) -> Result<Public, Error> {
let mut data = self.data.lock();
if !data.decrypted_secret.is_some() {
self.completed.wait(&mut data);
}
data.decrypted_secret.as_ref()
.expect("checked above or waited for completed; completed is only signaled when decrypted_secret.is_some(); qed")
.clone()
}
}
impl DecryptionSessionId {
/// Create new decryption session Id.
pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self {
DecryptionSessionId {
id: session_id,
access_key: sub_session_id,
}
}
}
impl PartialOrd for DecryptionSessionId {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for DecryptionSessionId {
fn cmp(&self, other: &Self) -> Ordering {
match self.id.cmp(&other.id) {
Ordering::Equal => self.access_key.cmp(&other.access_key),
r @ _ => r,
}
}
}
fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> {
use key_server_cluster::encryption_session::{check_cluster_nodes, check_threshold}; use key_server_cluster::encryption_session::{check_cluster_nodes, check_threshold};
let nodes = encrypted_data.id_numbers.keys().cloned().collect(); let nodes = encrypted_data.id_numbers.keys().cloned().collect();
@ -368,7 +461,7 @@ fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) -
Ok(()) Ok(())
} }
fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> {
if !data.requested_nodes.remove(node) { if !data.requested_nodes.remove(node) {
return Err(Error::InvalidMessage); return Err(Error::InvalidMessage);
} }
@ -395,7 +488,7 @@ fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut Se
Ok(()) Ok(())
} }
fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet<NodeId>, access_key: &Secret, encrypted_data: &EncryptedData) -> Result<Public, Error> { fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet<NodeId>, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result<Public, Error> {
let node_id_number = &encrypted_data.id_numbers[node]; let node_id_number = &encrypted_data.id_numbers[node];
let node_secret_share = &encrypted_data.secret_share; let node_secret_share = &encrypted_data.secret_share;
let other_id_numbers = participants.iter() let other_id_numbers = participants.iter()
@ -409,43 +502,42 @@ fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::str::FromStr;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use super::super::super::acl_storage::DummyAclStorage; use super::super::super::acl_storage::DummyAclStorage;
use ethkey::{self, Random, Generator, Public, Secret}; use ethkey::{self, Random, Generator, Public, Secret};
use key_server_cluster::{NodeId, EncryptedData, SessionId, Error}; use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error};
use key_server_cluster::cluster::tests::DummyCluster; use key_server_cluster::cluster::tests::DummyCluster;
use key_server_cluster::decryption_session::{Session, SessionParams, SessionState}; use key_server_cluster::decryption_session::{SessionImpl, SessionParams, SessionState};
use key_server_cluster::message::{self, Message}; use key_server_cluster::message::{self, Message, DecryptionMessage};
const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f";
fn prepare_decryption_sessions() -> (Vec<Arc<DummyCluster>>, Vec<Arc<DummyAclStorage>>, Vec<Session>) { fn prepare_decryption_sessions() -> (Vec<Arc<DummyCluster>>, Vec<Arc<DummyAclStorage>>, Vec<SessionImpl>) {
// prepare encrypted data + cluster configuration for scheme 4-of-5 // prepare encrypted data + cluster configuration for scheme 4-of-5
let session_id = SessionId::default(); let session_id = SessionId::default();
let access_key = Random.generate().unwrap().secret().clone(); let access_key = Random.generate().unwrap().secret().clone();
let secret_shares = vec![ let secret_shares: Vec<Secret> = vec![
Secret::from_str("834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec").unwrap(), "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(),
Secret::from_str("5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b").unwrap(), "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b".parse().unwrap(),
Secret::from_str("71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9").unwrap(), "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9".parse().unwrap(),
Secret::from_str("80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4").unwrap(), "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4".parse().unwrap(),
Secret::from_str("c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad").unwrap(), "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad".parse().unwrap(),
]; ];
let id_numbers: Vec<(NodeId, Secret)> = vec![ let id_numbers: Vec<(NodeId, Secret)> = vec![
("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), ("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(),
Secret::from_str("281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c").unwrap()), "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()),
("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(), ("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(),
Secret::from_str("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b").unwrap()), "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse().unwrap()),
("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), ("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(),
Secret::from_str("f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62").unwrap()), "f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62".parse().unwrap()),
("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), ("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(),
Secret::from_str("5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f").unwrap()), "5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f".parse().unwrap()),
("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(), ("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(),
Secret::from_str("12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8").unwrap()), "12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8".parse().unwrap()),
]; ];
let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into();
let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into();
let encrypted_datas: Vec<_> = (0..5).map(|i| EncryptedData { let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare {
threshold: 3, threshold: 3,
id_numbers: id_numbers.clone().into_iter().collect(), id_numbers: id_numbers.clone().into_iter().collect(),
secret_share: secret_shares[i].clone(), secret_share: secret_shares[i].clone(),
@ -454,7 +546,7 @@ mod tests {
}).collect(); }).collect();
let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect(); let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect();
let clusters: Vec<_> = (0..5).map(|i| Arc::new(DummyCluster::new(id_numbers.iter().nth(i).clone().unwrap().0))).collect(); let clusters: Vec<_> = (0..5).map(|i| Arc::new(DummyCluster::new(id_numbers.iter().nth(i).clone().unwrap().0))).collect();
let sessions: Vec<_> = (0..5).map(|i| Session::new(SessionParams { let sessions: Vec<_> = (0..5).map(|i| SessionImpl::new(SessionParams {
id: session_id.clone(), id: session_id.clone(),
access_key: access_key.clone(), access_key: access_key.clone(),
self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, self_node_id: id_numbers.iter().nth(i).clone().unwrap().0,
@ -466,11 +558,11 @@ mod tests {
(clusters, acl_storages, sessions) (clusters, acl_storages, sessions)
} }
fn do_messages_exchange(clusters: &[Arc<DummyCluster>], sessions: &[Session]) { fn do_messages_exchange(clusters: &[Arc<DummyCluster>], sessions: &[SessionImpl]) {
do_messages_exchange_until(clusters, sessions, |_, _, _| false); do_messages_exchange_until(clusters, sessions, |_, _, _| false);
} }
fn do_messages_exchange_until<F>(clusters: &[Arc<DummyCluster>], sessions: &[Session], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { fn do_messages_exchange_until<F>(clusters: &[Arc<DummyCluster>], sessions: &[SessionImpl], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool {
while let Some((from, to, message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() { while let Some((from, to, message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() {
let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()];
if cond(&from, &to, &message) { if cond(&from, &to, &message) {
@ -478,10 +570,10 @@ mod tests {
} }
match message { match message {
Message::InitializeDecryptionSession(message) => session.on_initialize_session(from, message).unwrap(), Message::Decryption(DecryptionMessage::InitializeDecryptionSession(message)) => session.on_initialize_session(from, &message).unwrap(),
Message::ConfirmDecryptionInitialization(message) => session.on_confirm_initialization(from, message).unwrap(), Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(message)) => session.on_confirm_initialization(from, &message).unwrap(),
Message::RequestPartialDecryption(message) => session.on_partial_decryption_requested(from, message).unwrap(), Message::Decryption(DecryptionMessage::RequestPartialDecryption(message)) => session.on_partial_decryption_requested(from, &message).unwrap(),
Message::PartialDecryption(message) => session.on_partial_decryption(from, message).unwrap(), Message::Decryption(DecryptionMessage::PartialDecryption(message)) => session.on_partial_decryption(from, &message).unwrap(),
_ => panic!("unexpected"), _ => panic!("unexpected"),
} }
} }
@ -492,11 +584,11 @@ mod tests {
let mut nodes = BTreeMap::new(); let mut nodes = BTreeMap::new();
let self_node_id = Random.generate().unwrap().public().clone(); let self_node_id = Random.generate().unwrap().public().clone();
nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); nodes.insert(self_node_id, Random.generate().unwrap().secret().clone());
match Session::new(SessionParams { match SessionImpl::new(SessionParams {
id: SessionId::default(), id: SessionId::default(),
access_key: Random.generate().unwrap().secret().clone(), access_key: Random.generate().unwrap().secret().clone(),
self_node_id: self_node_id.clone(), self_node_id: self_node_id.clone(),
encrypted_data: EncryptedData { encrypted_data: DocumentKeyShare {
threshold: 0, threshold: 0,
id_numbers: nodes, id_numbers: nodes,
secret_share: Random.generate().unwrap().secret().clone(), secret_share: Random.generate().unwrap().secret().clone(),
@ -517,11 +609,11 @@ mod tests {
let self_node_id = Random.generate().unwrap().public().clone(); let self_node_id = Random.generate().unwrap().public().clone();
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
match Session::new(SessionParams { match SessionImpl::new(SessionParams {
id: SessionId::default(), id: SessionId::default(),
access_key: Random.generate().unwrap().secret().clone(), access_key: Random.generate().unwrap().secret().clone(),
self_node_id: self_node_id.clone(), self_node_id: self_node_id.clone(),
encrypted_data: EncryptedData { encrypted_data: DocumentKeyShare {
threshold: 0, threshold: 0,
id_numbers: nodes, id_numbers: nodes,
secret_share: Random.generate().unwrap().secret().clone(), secret_share: Random.generate().unwrap().secret().clone(),
@ -542,11 +634,11 @@ mod tests {
let self_node_id = Random.generate().unwrap().public().clone(); let self_node_id = Random.generate().unwrap().public().clone();
nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone());
nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone());
match Session::new(SessionParams { match SessionImpl::new(SessionParams {
id: SessionId::default(), id: SessionId::default(),
access_key: Random.generate().unwrap().secret().clone(), access_key: Random.generate().unwrap().secret().clone(),
self_node_id: self_node_id.clone(), self_node_id: self_node_id.clone(),
encrypted_data: EncryptedData { encrypted_data: DocumentKeyShare {
threshold: 2, threshold: 2,
id_numbers: nodes, id_numbers: nodes,
secret_share: Random.generate().unwrap().secret().clone(), secret_share: Random.generate().unwrap().secret().clone(),
@ -572,70 +664,70 @@ mod tests {
fn fails_to_accept_initialization_when_already_initialized() { fn fails_to_accept_initialization_when_already_initialized() {
let (_, _, sessions) = prepare_decryption_sessions(); let (_, _, sessions) = prepare_decryption_sessions();
assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ()); assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ());
assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), message::InitializeDecryptionSession { assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), &message::InitializeDecryptionSession {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
}).unwrap_err(), Error::InvalidStateForRequest); }).unwrap_err(), Error::InvalidStateForRequest);
} }
#[test] #[test]
fn fails_to_partial_decrypt_if_not_waiting() { fn fails_to_partial_decrypt_if_not_waiting() {
let (_, _, sessions) = prepare_decryption_sessions(); let (_, _, sessions) = prepare_decryption_sessions();
assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
}).unwrap(), ()); }).unwrap(), ());
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(),
}).unwrap(), ()); }).unwrap(), ());
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(),
}).unwrap_err(), Error::InvalidStateForRequest); }).unwrap_err(), Error::InvalidStateForRequest);
} }
#[test] #[test]
fn fails_to_partial_decrypt_if_requested_by_slave() { fn fails_to_partial_decrypt_if_requested_by_slave() {
let (_, _, sessions) = prepare_decryption_sessions(); let (_, _, sessions) = prepare_decryption_sessions();
assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
}).unwrap(), ()); }).unwrap(), ());
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), message::RequestPartialDecryption { assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), &message::RequestPartialDecryption {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(),
}).unwrap_err(), Error::InvalidMessage); }).unwrap_err(), Error::InvalidMessage);
} }
#[test] #[test]
fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() {
let (_, _, sessions) = prepare_decryption_sessions(); let (_, _, sessions) = prepare_decryption_sessions();
assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(),
}).unwrap(), ()); }).unwrap(), ());
assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
nodes: sessions.iter().map(|s| s.node().clone()).take(2).collect(), nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(),
}).unwrap_err(), Error::InvalidMessage); }).unwrap_err(), Error::InvalidMessage);
} }
#[test] #[test]
fn fails_to_accept_partial_decrypt_if_not_waiting() { fn fails_to_accept_partial_decrypt_if_not_waiting() {
let (_, _, sessions) = prepare_decryption_sessions(); let (_, _, sessions) = prepare_decryption_sessions();
assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), message::PartialDecryption { assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), &message::PartialDecryption {
session: SessionId::default(), session: SessionId::default().into(),
sub_session: sessions[0].access_key().clone(), sub_session: sessions[0].access_key().clone().into(),
shadow_point: Random.generate().unwrap().public().clone(), shadow_point: Random.generate().unwrap().public().clone().into(),
}).unwrap_err(), Error::InvalidStateForRequest); }).unwrap_err(), Error::InvalidStateForRequest);
} }
@ -647,7 +739,7 @@ mod tests {
let mut pd_from = None; let mut pd_from = None;
let mut pd_msg = None; let mut pd_msg = None;
do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg {
&Message::PartialDecryption(ref msg) => { &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => {
pd_from = Some(from.clone()); pd_from = Some(from.clone());
pd_msg = Some(msg.clone()); pd_msg = Some(msg.clone());
true true
@ -655,8 +747,8 @@ mod tests {
_ => false, _ => false,
}); });
assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), pd_msg.clone().unwrap()).unwrap(), ()); assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ());
assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest);
} }
#[test] #[test]
@ -704,4 +796,9 @@ mod tests {
// 3) 0 sessions have decrypted key value // 3) 0 sessions have decrypted key value
assert!(sessions.iter().all(|s| s.decrypted_secret().is_none())); assert!(sessions.iter().all(|s| s.decrypted_secret().is_none()));
} }
#[test]
fn decryption_session_works_over_network() {
// TODO
}
} }

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,85 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::time::Duration;
use futures::{Future, Select, BoxFuture, Poll, Async};
use tokio_core::reactor::{Handle, Timeout};
type DeadlineBox<F> where F: Future = BoxFuture<DeadlineStatus<F::Item>, F::Error>;
/// Complete a passed future or fail if it is not completed within timeout.
pub fn deadline<F, T>(duration: Duration, handle: &Handle, future: F) -> Result<Deadline<F>, io::Error>
where F: Future<Item = T, Error = io::Error> + Send + 'static, T: 'static {
let timeout = try!(Timeout::new(duration, handle)).map(|_| DeadlineStatus::Timeout).boxed();
let future = future.map(DeadlineStatus::Meet).boxed();
let deadline = Deadline {
future: timeout.select(future),
};
Ok(deadline)
}
#[derive(Debug, PartialEq)]
/// Deadline future completion status.
pub enum DeadlineStatus<T> {
/// Completed a future.
Meet(T),
/// Faled with timeout.
Timeout,
}
/// Future, which waits for passed future completion within given period, or fails with timeout.
pub struct Deadline<F> where F: Future {
future: Select<DeadlineBox<F>, DeadlineBox<F>>,
}
impl<F, T> Future for Deadline<F> where F: Future<Item = T, Error = io::Error> {
type Item = DeadlineStatus<T>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.future.poll() {
Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err((err, _other)) => Err(err),
}
}
}
#[cfg(test)]
mod tests {
use std::io;
use std::time::Duration;
use futures::{Future, empty, done};
use tokio_core::reactor::Core;
use super::{deadline, DeadlineStatus};
//#[test] TODO: not working
fn _deadline_timeout_works() {
let mut core = Core::new().unwrap();
let deadline = deadline(Duration::from_millis(1), &core.handle(), empty::<(), io::Error>()).unwrap();
core.turn(Some(Duration::from_millis(3)));
assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Timeout);
}
#[test]
fn deadline_result_works() {
let mut core = Core::new().unwrap();
let deadline = deadline(Duration::from_millis(1000), &core.handle(), done(Ok(()))).unwrap();
core.turn(Some(Duration::from_millis(3)));
assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(()));
}
}

View File

@ -0,0 +1,320 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::collections::BTreeSet;
use futures::{Future, Poll, Async};
use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public};
use util::H256;
use key_server_cluster::{NodeId, Error};
use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature};
use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage,
read_message, compute_shared_key};
/// Start handshake procedure with another node from the cluster.
pub fn handshake<A>(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: io::Write + io::Read {
let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into);
handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes)
}
/// Start handshake procedure with another node from the cluster and given plain confirmation.
pub fn handshake_with_plain_confirmation<A>(a: A, self_confirmation_plain: Result<H256, Error>, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: io::Write + io::Read {
let (error, state) = match self_confirmation_plain.clone()
.and_then(|c| Handshake::<A>::make_public_key_message(self_key_pair.public().clone(), c)) {
Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))),
Err(err) => (Some((a, Err(err))), HandshakeState::Finished),
};
Handshake {
is_active: true,
error: error,
state: state,
self_key_pair: self_key_pair,
self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()),
trusted_nodes: trusted_nodes,
other_node_id: None,
other_confirmation_plain: None,
shared_key: None,
}
}
/// Wait for handshake procedure to be started by another node from the cluster.
pub fn accept_handshake<A>(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Handshake<A> where A: io::Write + io::Read {
let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into);
let (error, state) = match self_confirmation_plain.clone() {
Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))),
Err(err) => (Some((a, Err(err))), HandshakeState::Finished),
};
Handshake {
is_active: false,
error: error,
state: state,
self_key_pair: self_key_pair,
self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()),
trusted_nodes: trusted_nodes,
other_node_id: None,
other_confirmation_plain: None,
shared_key: None,
}
}
#[derive(Debug, PartialEq)]
/// Result of handshake procedure.
pub struct HandshakeResult {
/// Node id.
pub node_id: NodeId,
/// Shared key.
pub shared_key: Secret,
}
/// Future handshake procedure.
pub struct Handshake<A> {
is_active: bool,
error: Option<(A, Result<HandshakeResult, Error>)>,
state: HandshakeState<A>,
self_key_pair: KeyPair,
self_confirmation_plain: H256,
trusted_nodes: BTreeSet<NodeId>,
other_node_id: Option<NodeId>,
other_confirmation_plain: Option<H256>,
shared_key: Option<Secret>,
}
/// Active handshake state.
enum HandshakeState<A> {
SendPublicKey(WriteMessage<A>),
ReceivePublicKey(ReadMessage<A>),
SendPrivateKeySignature(WriteMessage<A>),
ReceivePrivateKeySignature(ReadMessage<A>),
Finished,
}
impl<A> Handshake<A> where A: io::Read + io::Write {
#[cfg(test)]
pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) {
self.self_confirmation_plain = self_confirmation_plain;
}
pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256) -> Result<Message, Error> {
Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
node_id: self_node_id.into(),
confirmation_plain: confirmation_plain.into(),
})))
}
fn make_private_key_signature_message(secret: &Secret, confirmation_plain: &H256) -> Result<Message, Error> {
Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
confirmation_signed: sign(secret, confirmation_plain)?.into(),
})))
}
}
impl<A> Future for Handshake<A> where A: io::Read + io::Write {
type Item = (A, Result<HandshakeResult, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(error_result) = self.error.take() {
return Ok(error_result.into());
}
let (next, result) = match self.state {
HandshakeState::SendPublicKey(ref mut future) => {
let (stream, _) = try_ready!(future.poll());
if self.is_active {
(HandshakeState::ReceivePublicKey(
read_message(stream)
), Async::NotReady)
} else {
self.shared_key = match compute_shared_key(self.self_key_pair.secret(),
self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed")
) {
Ok(shared_key) => Some(shared_key),
Err(err) => return Ok((stream, Err(err)).into()),
};
let message = match Handshake::<A>::make_private_key_signature_message(
self.self_key_pair.secret(),
self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed")
) {
Ok(message) => message,
Err(err) => return Ok((stream, Err(err)).into()),
};
(HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream,
self.shared_key.as_ref().expect("filled couple of lines above; qed"),
message)), Async::NotReady)
}
},
HandshakeState::ReceivePublicKey(ref mut future) => {
let (stream, message) = try_ready!(future.poll());
let message = match message {
Ok(message) => match message {
Message::Cluster(ClusterMessage::NodePublicKey(message)) => message,
_ => return Ok((stream, Err(Error::InvalidMessage)).into()),
},
Err(err) => return Ok((stream, Err(err.into())).into()),
};
if !self.trusted_nodes.contains(&*message.node_id) {
return Ok((stream, Err(Error::InvalidNodeId)).into());
}
self.other_node_id = Some(message.node_id.into());
self.other_confirmation_plain = Some(message.confirmation_plain.into());
if self.is_active {
self.shared_key = match compute_shared_key(self.self_key_pair.secret(),
self.other_node_id.as_ref().expect("filled couple of lines above; qed")
) {
Ok(shared_key) => Some(shared_key),
Err(err) => return Ok((stream, Err(err)).into()),
};
let message = match Handshake::<A>::make_private_key_signature_message(
self.self_key_pair.secret(),
self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed")
) {
Ok(message) => message,
Err(err) => return Ok((stream, Err(err)).into()),
};
(HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream,
self.shared_key.as_ref().expect("filled couple of lines above; qed"),
message)), Async::NotReady)
} else {
let message = match Handshake::<A>::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone()) {
Ok(message) => message,
Err(err) => return Ok((stream, Err(err)).into()),
};
(HandshakeState::SendPublicKey(write_message(stream, message)), Async::NotReady)
}
},
HandshakeState::SendPrivateKeySignature(ref mut future) => {
let (stream, _) = try_ready!(future.poll());
(HandshakeState::ReceivePrivateKeySignature(
read_message(stream)
), Async::NotReady)
},
HandshakeState::ReceivePrivateKeySignature(ref mut future) => {
let (stream, message) = try_ready!(future.poll());
let message = match message {
Ok(message) => match message {
Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => message,
_ => return Ok((stream, Err(Error::InvalidMessage)).into()),
},
Err(err) => return Ok((stream, Err(err.into())).into()),
};
let other_node_public = self.other_node_id.as_ref().expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed");
if !verify_public(other_node_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) {
return Ok((stream, Err(Error::InvalidMessage)).into());
}
(HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult {
node_id: self.other_node_id.expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"),
shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"),
}))))
},
HandshakeState::Finished => panic!("poll Handshake after it's done"),
};
self.state = next;
match result {
// by polling again, we register new future
Async::NotReady => self.poll(),
result => Ok(result)
}
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeSet;
use futures::Future;
use ethcrypto::ecdh::agree;
use ethkey::{Random, Generator, sign};
use util::H256;
use key_server_cluster::io::message::tests::TestIo;
use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature};
use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult};
fn prepare_test_io() -> (H256, TestIo) {
let self_key_pair = Random.generate().unwrap();
let peer_key_pair = Random.generate().unwrap();
let mut io = TestIo::new(self_key_pair.clone(), peer_key_pair.public().clone());
let self_confirmation_plain = *Random.generate().unwrap().secret().clone();
let peer_confirmation_plain = *Random.generate().unwrap().secret().clone();
let self_confirmation_signed = sign(peer_key_pair.secret(), &self_confirmation_plain).unwrap();
let peer_confirmation_signed = sign(self_key_pair.secret(), &peer_confirmation_plain).unwrap();
io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
node_id: peer_key_pair.public().clone().into(),
confirmation_plain: peer_confirmation_plain.into(),
})));
io.add_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
confirmation_signed: self_confirmation_signed.into(),
})));
io.add_output_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey {
node_id: self_key_pair.public().clone().into(),
confirmation_plain: self_confirmation_plain.clone().into(),
})));
io.add_output_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature {
confirmation_signed: peer_confirmation_signed.into(),
})));
(self_confirmation_plain, io)
}
#[test]
fn active_handshake_works() {
let (self_confirmation_plain, io) = prepare_test_io();
let self_key_pair = io.self_key_pair().clone();
let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect();
let shared_key = agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap();
let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes);
let handshake_result = handshake.wait().unwrap();
assert_eq!(handshake_result.1, Ok(HandshakeResult {
node_id: handshake_result.0.peer_public().clone(),
shared_key: shared_key,
}));
handshake_result.0.assert_output();
}
#[test]
fn passive_handshake_works() {
let (self_confirmation_plain, io) = prepare_test_io();
let self_key_pair = io.self_key_pair().clone();
let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect();
let shared_key = agree(self_key_pair.secret(), io.peer_public()).unwrap();
let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes);
handshake.set_self_confirmation_plain(self_confirmation_plain);
let handshake_result = handshake.wait().unwrap();
assert_eq!(handshake_result.1, Ok(HandshakeResult {
node_id: handshake_result.0.peer_public().clone(),
shared_key: shared_key,
}));
handshake_result.0.assert_output();
}
}

View File

@ -0,0 +1,247 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io::Cursor;
use std::u16;
use std::ops::Deref;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use serde_json;
use ethcrypto::ecdh::agree;
use ethkey::{Public, Secret};
use key_server_cluster::Error;
use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage};
/// Size of serialized header.
pub const MESSAGE_HEADER_SIZE: usize = 4;
#[derive(Debug, PartialEq)]
/// Message header.
pub struct MessageHeader {
/// Message/Header version.
pub version: u8,
/// Message kind.
pub kind: u8,
/// Message payload size (without header).
pub size: u16,
}
#[derive(Debug, Clone, PartialEq)]
/// Serialized message.
pub struct SerializedMessage(Vec<u8>);
impl Deref for SerializedMessage {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.0
}
}
impl Into<Vec<u8>> for SerializedMessage {
fn into(self) -> Vec<u8> {
self.0
}
}
/// Serialize message.
pub fn serialize_message(message: Message) -> Result<SerializedMessage, Error> {
let (message_kind, payload) = match message {
Message::Cluster(ClusterMessage::NodePublicKey(payload)) => (1, serde_json::to_vec(&payload)),
Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => (2, serde_json::to_vec(&payload)),
Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)),
Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::Complaint(payload)) => (54, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::ComplaintResponse(payload)) => (55, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::PublicKeyShare(payload)) => (56, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::SessionError(payload)) => (57, serde_json::to_vec(&payload)),
Message::Encryption(EncryptionMessage::SessionCompleted(payload)) => (58, serde_json::to_vec(&payload)),
Message::Decryption(DecryptionMessage::InitializeDecryptionSession(payload)) => (100, serde_json::to_vec(&payload)),
Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)),
Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (102, serde_json::to_vec(&payload)),
Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (103, serde_json::to_vec(&payload)),
Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (104, serde_json::to_vec(&payload)),
};
let payload = payload.map_err(|err| Error::Serde(err.to_string()))?;
let payload_len = payload.len();
if payload_len > u16::MAX as usize {
return Err(Error::InvalidMessage);
}
let header = MessageHeader {
kind: message_kind,
version: 1,
size: payload_len as u16,
};
let mut serialized_message = serialize_header(&header)?;
serialized_message.extend(payload);
Ok(SerializedMessage(serialized_message))
}
/// Deserialize message.
pub fn deserialize_message(header: &MessageHeader, payload: Vec<u8>) -> Result<Message, Error> {
Ok(match header.kind {
1 => Message::Cluster(ClusterMessage::NodePublicKey(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
50 => Message::Encryption(EncryptionMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
51 => Message::Encryption(EncryptionMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
52 => Message::Encryption(EncryptionMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
53 => Message::Encryption(EncryptionMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
54 => Message::Encryption(EncryptionMessage::Complaint(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
55 => Message::Encryption(EncryptionMessage::ComplaintResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
56 => Message::Encryption(EncryptionMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
57 => Message::Encryption(EncryptionMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
58 => Message::Encryption(EncryptionMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
100 => Message::Decryption(DecryptionMessage::InitializeDecryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
101 => Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
102 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
103 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
104 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)),
_ => return Err(Error::Serde(format!("unknown message type {}", header.kind))),
})
}
/// Encrypt serialized message.
pub fn encrypt_message(_key: &Secret, message: SerializedMessage) -> Result<SerializedMessage, Error> {
Ok(message) // TODO: implement me
}
/// Decrypt serialized message.
pub fn decrypt_message(_key: &Secret, payload: Vec<u8>) -> Result<Vec<u8>, Error> {
Ok(payload) // TODO: implement me
}
/// Compute shared encryption key.
pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result<Secret, Error> {
Ok(agree(self_secret, other_public)?)
}
/// Serialize message header.
fn serialize_header(header: &MessageHeader) -> Result<Vec<u8>, Error> {
let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE);
buffer.write_u8(header.version)?;
buffer.write_u8(header.kind)?;
buffer.write_u16::<LittleEndian>(header.size)?;
Ok(buffer)
}
/// Deserialize message header.
pub fn deserialize_header(data: &[u8]) -> Result<MessageHeader, Error> {
let mut reader = Cursor::new(data);
Ok(MessageHeader {
version: reader.read_u8()?,
kind: reader.read_u8()?,
size: reader.read_u16::<LittleEndian>()?,
})
}
#[cfg(test)]
pub mod tests {
use std::io;
use ethkey::{KeyPair, Public};
use key_server_cluster::message::Message;
use super::{MESSAGE_HEADER_SIZE, MessageHeader, serialize_message, serialize_header, deserialize_header};
pub struct TestIo {
self_key_pair: KeyPair,
peer_public: Public,
input_buffer: io::Cursor<Vec<u8>>,
output_buffer: Vec<u8>,
expected_output_buffer: Vec<u8>,
}
impl TestIo {
pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self {
TestIo {
self_key_pair: self_key_pair,
peer_public: peer_public,
input_buffer: io::Cursor::new(Vec::new()),
output_buffer: Vec::new(),
expected_output_buffer: Vec::new(),
}
}
pub fn self_key_pair(&self) -> &KeyPair {
&self.self_key_pair
}
pub fn peer_public(&self) -> &Public {
&self.peer_public
}
pub fn add_input_message(&mut self, message: Message) {
let serialized_message = serialize_message(message).unwrap();
let serialized_message: Vec<_> = serialized_message.into();
let input_buffer = self.input_buffer.get_mut();
for b in serialized_message {
input_buffer.push(b);
}
}
pub fn add_output_message(&mut self, message: Message) {
let serialized_message = serialize_message(message).unwrap();
let serialized_message: Vec<_> = serialized_message.into();
self.expected_output_buffer.extend(serialized_message);
}
pub fn assert_output(&self) {
assert_eq!(self.output_buffer, self.expected_output_buffer);
}
}
impl io::Read for TestIo {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
io::Read::read(&mut self.input_buffer, buf)
}
}
impl io::Write for TestIo {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
io::Write::write(&mut self.output_buffer, buf)
}
fn flush(&mut self) -> io::Result<()> {
io::Write::flush(&mut self.output_buffer)
}
}
#[test]
fn header_serialization_works() {
let header = MessageHeader {
kind: 1,
version: 2,
size: 3,
};
let serialized_header = serialize_header(&header).unwrap();
assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE);
let deserialized_header = deserialize_header(&serialized_header).unwrap();
assert_eq!(deserialized_header, header);
}
}

View File

@ -0,0 +1,34 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
mod deadline;
mod handshake;
mod message;
mod read_header;
mod read_payload;
mod read_message;
mod shared_tcp_stream;
mod write_message;
pub use self::deadline::{deadline, Deadline, DeadlineStatus};
pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult};
pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message,
encrypt_message, compute_shared_key};
pub use self::read_header::{read_header, ReadHeader};
pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload};
pub use self::read_message::{read_message, read_encrypted_message, ReadMessage};
pub use self::shared_tcp_stream::SharedTcpStream;
pub use self::write_message::{write_message, write_encrypted_message, WriteMessage};

View File

@ -0,0 +1,44 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Future, Poll, Async};
use tokio_core::io::{ReadExact, read_exact};
use key_server_cluster::Error;
use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header};
/// Create future for read single message header from the stream.
pub fn read_header<A>(a: A) -> ReadHeader<A> where A: io::Read {
ReadHeader {
reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]),
}
}
/// Future for read single message header from the stream.
pub struct ReadHeader<A> {
reader: ReadExact<A, [u8; MESSAGE_HEADER_SIZE]>,
}
impl<A> Future for ReadHeader<A> where A: io::Read {
type Item = (A, Result<MessageHeader, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (read, data) = try_ready!(self.reader.poll());
let header = deserialize_header(&data);
Ok(Async::Ready((read, header)))
}
}

View File

@ -0,0 +1,86 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Poll, Future, Async};
use ethkey::Secret;
use key_server_cluster::Error;
use key_server_cluster::message::Message;
use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload};
/// Create future for read single message from the stream.
pub fn read_message<A>(a: A) -> ReadMessage<A> where A: io::Read {
ReadMessage {
key: None,
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
/// Create future for read single encrypted message from the stream.
pub fn read_encrypted_message<A>(a: A, key: Secret) -> ReadMessage<A> where A: io::Read {
ReadMessage {
key: Some(key),
state: ReadMessageState::ReadHeader(read_header(a)),
}
}
enum ReadMessageState<A> {
ReadHeader(ReadHeader<A>),
ReadPayload(ReadPayload<A>),
Finished,
}
/// Future for read single message from the stream.
pub struct ReadMessage<A> {
key: Option<Secret>,
state: ReadMessageState<A>,
}
impl<A> Future for ReadMessage<A> where A: io::Read {
type Item = (A, Result<Message, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (next, result) = match self.state {
ReadMessageState::ReadHeader(ref mut future) => {
let (read, header) = try_ready!(future.poll());
let header = match header {
Ok(header) => header,
Err(err) => return Ok((read, Err(err)).into()),
};
let future = match self.key.take() {
Some(key) => read_encrypted_payload(read, header, key),
None => read_payload(read, header),
};
let next = ReadMessageState::ReadPayload(future);
(next, Async::NotReady)
},
ReadMessageState::ReadPayload(ref mut future) => {
let (read, payload) = try_ready!(future.poll());
(ReadMessageState::Finished, Async::Ready((read, payload)))
},
ReadMessageState::Finished => panic!("poll ReadMessage after it's done"),
};
self.state = next;
match result {
// by polling again, we register new future
Async::NotReady => self.poll(),
result => Ok(result)
}
}
}

View File

@ -0,0 +1,64 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Poll, Future};
use tokio_core::io::{read_exact, ReadExact};
use ethkey::Secret;
use key_server_cluster::Error;
use key_server_cluster::message::Message;
use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message};
/// Create future for read single message payload from the stream.
pub fn read_payload<A>(a: A, header: MessageHeader) -> ReadPayload<A> where A: io::Read {
ReadPayload {
reader: read_exact(a, vec![0; header.size as usize]),
header: header,
key: None,
}
}
/// Create future for read single encrypted message payload from the stream.
pub fn read_encrypted_payload<A>(a: A, header: MessageHeader, key: Secret) -> ReadPayload<A> where A: io::Read {
ReadPayload {
reader: read_exact(a, vec![0; header.size as usize]),
header: header,
key: Some(key),
}
}
/// Future for read single message payload from the stream.
pub struct ReadPayload<A> {
reader: ReadExact<A, Vec<u8>>,
header: MessageHeader,
key: Option<Secret>,
}
impl<A> Future for ReadPayload<A> where A: io::Read {
type Item = (A, Result<Message, Error>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (read, data) = try_ready!(self.reader.poll());
let payload = if let Some(key) = self.key.take() {
decrypt_message(&key, data)
.and_then(|data| deserialize_message(&self.header, data))
} else {
deserialize_message(&self.header, data)
};
Ok((read, payload).into())
}
}

View File

@ -0,0 +1,60 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::io::{Read, Write, Error};
use tokio_core::net::TcpStream;
/// Read+Write implementation for Arc<TcpStream>.
pub struct SharedTcpStream {
io: Arc<TcpStream>,
}
impl SharedTcpStream {
pub fn new(a: Arc<TcpStream>) -> Self {
SharedTcpStream {
io: a,
}
}
}
impl From<TcpStream> for SharedTcpStream {
fn from(a: TcpStream) -> Self {
SharedTcpStream::new(Arc::new(a))
}
}
impl Read for SharedTcpStream {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
Read::read(&mut (&*self.io as &TcpStream), buf)
}
}
impl Write for SharedTcpStream {
fn write(&mut self, buf: &[u8]) -> Result<usize, Error> {
Write::write(&mut (&*self.io as &TcpStream), buf)
}
fn flush(&mut self) -> Result<(), Error> {
Write::flush(&mut (&*self.io as &TcpStream))
}
}
impl Clone for SharedTcpStream {
fn clone(&self) -> Self {
SharedTcpStream::new(self.io.clone())
}
}

View File

@ -0,0 +1,70 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::{Future, Poll};
use tokio_core::io::{WriteAll, write_all};
use ethkey::Secret;
use key_server_cluster::message::Message;
use key_server_cluster::io::{serialize_message, encrypt_message};
/// Write plain message to the channel.
pub fn write_message<A>(a: A, message: Message) -> WriteMessage<A> where A: io::Write {
let (error, future) = match serialize_message(message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) {
Ok(message) => (None, write_all(a, message.into())),
Err(error) => (Some(error), write_all(a, Vec::new())),
};
WriteMessage {
error: error,
future: future,
}
}
/// Write encrypted message to the channel.
pub fn write_encrypted_message<A>(a: A, key: &Secret, message: Message) -> WriteMessage<A> where A: io::Write {
let (error, future) = match serialize_message(message)
.and_then(|message| encrypt_message(key, message))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) {
Ok(message) => (None, write_all(a, message.into())),
Err(error) => (Some(error), write_all(a, Vec::new())),
};
WriteMessage {
error: error,
future: future,
}
}
/// Future message write.
pub struct WriteMessage<A> {
error: Option<io::Error>,
future: WriteAll<A, Vec<u8>>,
}
impl<A> Future for WriteMessage<A> where A: io::Write {
type Item = (A, Vec<u8>);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(err) = self.error.take() {
return Err(err);
}
self.future.poll()
}
}

View File

@ -160,7 +160,7 @@ pub fn compute_joint_secret<'a, I>(mut secret_coeffs: I) -> Result<Secret, Error
} }
/// Encrypt secret with joint public key. /// Encrypt secret with joint public key.
pub fn encrypt_secret(secret: Public, joint_public: &Public) -> Result<EncryptedSecret, Error> { pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result<EncryptedSecret, Error> {
// this is performed by KS-cluster client (or KS master) // this is performed by KS-cluster client (or KS master)
let key_pair = Random.generate()?; let key_pair = Random.generate()?;
@ -171,7 +171,7 @@ pub fn encrypt_secret(secret: Public, joint_public: &Public) -> Result<Encrypted
// M + k * y // M + k * y
let mut encrypted_point = joint_public.clone(); let mut encrypted_point = joint_public.clone();
math::public_mul_secret(&mut encrypted_point, key_pair.secret())?; math::public_mul_secret(&mut encrypted_point, key_pair.secret())?;
math::public_add(&mut encrypted_point, &secret)?; math::public_add(&mut encrypted_point, secret)?;
Ok(EncryptedSecret { Ok(EncryptedSecret {
common_point: common_point, common_point: common_point,
@ -181,7 +181,11 @@ pub fn encrypt_secret(secret: Public, joint_public: &Public) -> Result<Encrypted
/// Compute shadow for the node. /// Compute shadow for the node.
pub fn compute_node_shadow<'a, I>(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> { pub fn compute_node_shadow<'a, I>(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result<Secret, Error> where I: Iterator<Item=&'a Secret> {
let other_node_number = other_nodes_numbers.next().expect("compute_node_shadow is called when at least two nodes are required to decrypt secret; qed"); let other_node_number = match other_nodes_numbers.next() {
Some(other_node_number) => other_node_number,
None => return Ok(node_secret_share.clone()),
};
let mut shadow = node_number.clone(); let mut shadow = node_number.clone();
shadow.sub(other_node_number)?; shadow.sub(other_node_number)?;
shadow.inv()?; shadow.inv()?;
@ -231,17 +235,24 @@ pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point:
} }
/// Decrypt data using joint shadow point. /// Decrypt data using joint shadow point.
pub fn decrypt_with_joint_shadow(access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result<Public, Error> { pub fn decrypt_with_joint_shadow(threshold: usize, access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result<Public, Error> {
let mut inv_access_key = access_key.clone(); let mut inv_access_key = access_key.clone();
inv_access_key.inv()?; inv_access_key.inv()?;
let mut decrypted_point = joint_shadow_point.clone(); let mut mul = joint_shadow_point.clone();
math::public_mul_secret(&mut decrypted_point, &inv_access_key)?; math::public_mul_secret(&mut mul, &inv_access_key)?;
math::public_add(&mut decrypted_point, encrypted_point)?;
let mut decrypted_point = encrypted_point.clone();
if threshold % 2 != 0 {
math::public_add(&mut decrypted_point, &mul)?;
} else {
math::public_sub(&mut decrypted_point, &mul)?;
}
Ok(decrypted_point) Ok(decrypted_point)
} }
#[cfg(test)]
/// Decrypt data using joint secret (version for tests). /// Decrypt data using joint secret (version for tests).
pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result<Public, Error> { pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result<Public, Error> {
let mut common_point_mul = common_point.clone(); let mut common_point_mul = common_point.clone();
@ -262,7 +273,7 @@ pub mod tests {
// === PART2: encryption using joint public key === // === PART2: encryption using joint public key ===
// the next line is executed on KeyServer-client // the next line is executed on KeyServer-client
let encrypted_secret = encrypt_secret(document_secret_plain.clone(), &joint_public).unwrap(); let encrypted_secret = encrypt_secret(&document_secret_plain, &joint_public).unwrap();
// === PART3: decryption === // === PART3: decryption ===
@ -285,7 +296,7 @@ pub mod tests {
assert_eq!(joint_shadow_point, joint_shadow_point_test); assert_eq!(joint_shadow_point, joint_shadow_point_test);
// decrypt encrypted secret using joint shadow point // decrypt encrypted secret using joint shadow point
let document_secret_decrypted = decrypt_with_joint_shadow(&access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); let document_secret_decrypted = decrypt_with_joint_shadow(t, &access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap();
// decrypt encrypted secret using joint secret [just for test] // decrypt encrypted secret using joint secret [just for test]
let document_secret_decrypted_test = match joint_secret { let document_secret_decrypted_test = match joint_secret {
@ -298,7 +309,8 @@ pub mod tests {
#[test] #[test]
fn full_encryption_math_session() { fn full_encryption_math_session() {
let test_cases = [(1, 3)]; let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5),
(1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)];
for &(t, n) in &test_cases { for &(t, n) in &test_cases {
// === PART1: DKG === // === PART1: DKG ===

View File

@ -14,13 +14,42 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::collections::{BTreeSet, BTreeMap}; use std::collections::{BTreeSet, BTreeMap};
use ethkey::{Public, Secret, Signature}; use ethkey::Secret;
use key_server_cluster::{NodeId, SessionId}; use key_server_cluster::SessionId;
use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature};
pub type MessageSessionId = SerializableH256;
pub type MessageNodeId = SerializablePublic;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
/// All possible messages that can be sent during DKG. /// All possible messages that can be sent during encryption/decryption sessions.
pub enum Message { pub enum Message {
/// Cluster message.
Cluster(ClusterMessage),
/// Encryption message.
Encryption(EncryptionMessage),
/// Decryption message.
Decryption(DecryptionMessage),
}
#[derive(Clone, Debug)]
/// All possible cluster-level messages.
pub enum ClusterMessage {
/// Introduce node public key.
NodePublicKey(NodePublicKey),
/// Confirm that node owns its private key.
NodePrivateKeySignature(NodePrivateKeySignature),
/// Keep alive message.
KeepAlive(KeepAlive),
/// Keep alive message response.
KeepAliveResponse(KeepAliveResponse),
}
#[derive(Clone, Debug)]
/// All possible messages that can be sent during encryption session.
pub enum EncryptionMessage {
/// Initialize new DKG session. /// Initialize new DKG session.
InitializeSession(InitializeSession), InitializeSession(InitializeSession),
/// Confirm DKG session initialization. /// Confirm DKG session initialization.
@ -35,7 +64,15 @@ pub enum Message {
ComplaintResponse(ComplaintResponse), ComplaintResponse(ComplaintResponse),
/// Broadcast self public key portion. /// Broadcast self public key portion.
PublicKeyShare(PublicKeyShare), PublicKeyShare(PublicKeyShare),
/// When session error has occured.
SessionError(SessionError),
/// When session is completed.
SessionCompleted(SessionCompleted),
}
#[derive(Clone, Debug)]
/// All possible messages that can be sent during decryption session.
pub enum DecryptionMessage {
/// Initialize decryption session. /// Initialize decryption session.
InitializeDecryptionSession(InitializeDecryptionSession), InitializeDecryptionSession(InitializeDecryptionSession),
/// Confirm/reject decryption session initialization. /// Confirm/reject decryption session initialization.
@ -44,125 +81,272 @@ pub enum Message {
RequestPartialDecryption(RequestPartialDecryption), RequestPartialDecryption(RequestPartialDecryption),
/// Partial decryption is completed /// Partial decryption is completed
PartialDecryption(PartialDecryption), PartialDecryption(PartialDecryption),
/// When decryption session error has occured.
DecryptionSessionError(DecryptionSessionError),
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Introduce node public key.
pub struct NodePublicKey {
/// Node identifier (aka node public key).
pub node_id: MessageNodeId,
/// Data, which must be signed by peer to prove that he owns the corresponding private key.
pub confirmation_plain: SerializableH256,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// Confirm that node owns the private key of previously passed public key (aka node id).
pub struct NodePrivateKeySignature {
/// Previously passed `confirmation_plain`, signed with node private key.
pub confirmation_signed: SerializableSignature,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// Ask if the node is still alive.
pub struct KeepAlive {
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// Confirm that the node is still alive.
pub struct KeepAliveResponse {
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// Initialize new DKG session. /// Initialize new DKG session.
pub struct InitializeSession { pub struct InitializeSession {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Derived generation point. Starting from originator, every node must multiply this /// Derived generation point. Starting from originator, every node must multiply this
/// point by random scalar (unknown by other nodes). At the end of initialization /// point by random scalar (unknown by other nodes). At the end of initialization
/// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)`
/// is unknown for every node. /// is unknown for every node.
pub derived_point: Public, pub derived_point: SerializablePublic,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Confirm DKG session initialization. /// Confirm DKG session initialization.
pub struct ConfirmInitialization { pub struct ConfirmInitialization {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Derived generation point. /// Derived generation point.
pub derived_point: Public, pub derived_point: SerializablePublic,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Broadcast generated point to every other node. /// Broadcast generated point to every other node.
pub struct CompleteInitialization { pub struct CompleteInitialization {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// All session participants along with their identification numbers. /// All session participants along with their identification numbers.
pub nodes: BTreeMap<NodeId, Secret>, pub nodes: BTreeMap<MessageNodeId, SerializableSecret>,
/// Decryption threshold. During decryption threshold-of-route.len() nodes must came to /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to
/// consensus to successfully decrypt message. /// consensus to successfully decrypt message.
pub threshold: usize, pub threshold: usize,
/// Derived generation point. /// Derived generation point.
pub derived_point: Public, pub derived_point: SerializablePublic,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Generated keys are sent to every node. /// Generated keys are sent to every node.
pub struct KeysDissemination { pub struct KeysDissemination {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Secret 1. /// Secret 1.
pub secret1: Secret, pub secret1: SerializableSecret,
/// Secret 2. /// Secret 2.
pub secret2: Secret, pub secret2: SerializableSecret,
/// Public values. /// Public values.
pub publics: Vec<Public>, pub publics: Vec<SerializablePublic>,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Complaint against node is broadcasted. /// Complaint against node is broadcasted.
pub struct Complaint { pub struct Complaint {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Public values. /// Public values.
pub against: NodeId, pub against: MessageNodeId,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Node is responding to complaint. /// Node is responding to complaint.
pub struct ComplaintResponse { pub struct ComplaintResponse {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Secret 1. /// Secret 1.
pub secret1: Secret, pub secret1: SerializableSecret,
/// Secret 2. /// Secret 2.
pub secret2: Secret, pub secret2: SerializableSecret,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Node is sharing its public key share. /// Node is sharing its public key share.
pub struct PublicKeyShare { pub struct PublicKeyShare {
/// Session Id. /// Session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Public key share. /// Public key share.
pub public_share: Public, pub public_share: SerializablePublic,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// When session error has occured.
pub struct SessionError {
/// Session Id.
pub session: MessageSessionId,
/// Public key share.
pub error: String,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// When session is completed.
pub struct SessionCompleted {
/// Session Id.
pub session: MessageSessionId,
/// Common (shared) encryption point.
pub common_point: SerializablePublic,
/// Encrypted point.
pub encrypted_point: SerializablePublic,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// Node is requested to decrypt data, encrypted in given session. /// Node is requested to decrypt data, encrypted in given session.
pub struct InitializeDecryptionSession { pub struct InitializeDecryptionSession {
/// Encryption session Id. /// Encryption session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Decryption session Id. /// Decryption session Id.
pub sub_session: Secret, pub sub_session: SerializableSecret,
/// Requestor signature. /// Requestor signature.
pub requestor_signature: Signature, pub requestor_signature: SerializableSignature,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Node is responding to decryption request. /// Node is responding to decryption request.
pub struct ConfirmDecryptionInitialization { pub struct ConfirmDecryptionInitialization {
/// Encryption session Id. /// Encryption session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Decryption session Id. /// Decryption session Id.
pub sub_session: Secret, pub sub_session: SerializableSecret,
/// Is node confirmed to make a decryption?. /// Is node confirmed to make a decryption?.
pub is_confirmed: bool, pub is_confirmed: bool,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Node is requested to do a partial decryption. /// Node is requested to do a partial decryption.
pub struct RequestPartialDecryption { pub struct RequestPartialDecryption {
/// Encryption session Id. /// Encryption session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Decryption session Id. /// Decryption session Id.
pub sub_session: Secret, pub sub_session: SerializableSecret,
/// Nodes that are agreed to do a decryption. /// Nodes that are agreed to do a decryption.
pub nodes: BTreeSet<NodeId>, pub nodes: BTreeSet<MessageNodeId>,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
/// Node has partially decrypted the secret. /// Node has partially decrypted the secret.
pub struct PartialDecryption { pub struct PartialDecryption {
/// Encryption session Id. /// Encryption session Id.
pub session: SessionId, pub session: MessageSessionId,
/// Decryption session Id. /// Decryption session Id.
pub sub_session: Secret, pub sub_session: SerializableSecret,
/// Partially decrypted secret. /// Partially decrypted secret.
pub shadow_point: Public, pub shadow_point: SerializablePublic,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
/// When decryption session error has occured.
pub struct DecryptionSessionError {
/// Encryption session Id.
pub session: MessageSessionId,
/// Decryption session Id.
pub sub_session: SerializableSecret,
/// Public key share.
pub error: String,
}
impl EncryptionMessage {
pub fn session_id(&self) -> &SessionId {
match *self {
EncryptionMessage::InitializeSession(ref msg) => &msg.session,
EncryptionMessage::ConfirmInitialization(ref msg) => &msg.session,
EncryptionMessage::CompleteInitialization(ref msg) => &msg.session,
EncryptionMessage::KeysDissemination(ref msg) => &msg.session,
EncryptionMessage::Complaint(ref msg) => &msg.session,
EncryptionMessage::ComplaintResponse(ref msg) => &msg.session,
EncryptionMessage::PublicKeyShare(ref msg) => &msg.session,
EncryptionMessage::SessionError(ref msg) => &msg.session,
EncryptionMessage::SessionCompleted(ref msg) => &msg.session,
}
}
}
impl DecryptionMessage {
pub fn session_id(&self) -> &SessionId {
match *self {
DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.session,
DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.session,
DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session,
DecryptionMessage::PartialDecryption(ref msg) => &msg.session,
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session,
}
}
pub fn sub_session_id(&self) -> &Secret {
match *self {
DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.sub_session,
DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.sub_session,
DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session,
DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session,
DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session,
}
}
}
impl fmt::Display for Message {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Message::Cluster(ref message) => write!(f, "Cluster.{}", message),
Message::Encryption(ref message) => write!(f, "Encryption.{}", message),
Message::Decryption(ref message) => write!(f, "Decryption.{}", message),
}
}
}
impl fmt::Display for ClusterMessage {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ClusterMessage::NodePublicKey(_) => write!(f, "NodePublicKey"),
ClusterMessage::NodePrivateKeySignature(_) => write!(f, "NodePrivateKeySignature"),
ClusterMessage::KeepAlive(_) => write!(f, "KeepAlive"),
ClusterMessage::KeepAliveResponse(_) => write!(f, "KeepAliveResponse"),
}
}
}
impl fmt::Display for EncryptionMessage {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
EncryptionMessage::InitializeSession(_) => write!(f, "InitializeSession"),
EncryptionMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"),
EncryptionMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"),
EncryptionMessage::KeysDissemination(_) => write!(f, "KeysDissemination"),
EncryptionMessage::Complaint(_) => write!(f, "Complaint"),
EncryptionMessage::ComplaintResponse(_) => write!(f, "ComplaintResponse"),
EncryptionMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"),
EncryptionMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error),
EncryptionMessage::SessionCompleted(_) => write!(f, "SessionCompleted"),
}
}
}
impl fmt::Display for DecryptionMessage {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DecryptionMessage::InitializeDecryptionSession(_) => write!(f, "InitializeDecryptionSession"),
DecryptionMessage::ConfirmDecryptionInitialization(_) => write!(f, "ConfirmDecryptionInitialization"),
DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"),
DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"),
DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"),
}
}
} }

View File

@ -14,21 +14,36 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(dead_code)] // TODO: remove me use std::fmt;
use std::io::Error as IoError;
use std::collections::BTreeMap; use ethkey;
use ethkey::{self, Public, Secret, Signature}; use ethcrypto;
use super::types::all::DocumentAddress; use super::types::all::DocumentAddress;
pub use super::acl_storage::AclStorage; pub use super::types::all::{NodeId, EncryptionConfiguration};
pub use super::acl_storage::{AclStorage, DummyAclStorage};
pub use super::key_storage::{KeyStorage, DocumentKeyShare};
pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic};
pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient};
pub use self::encryption_session::Session as EncryptionSession;
pub use self::decryption_session::Session as DecryptionSession;
#[cfg(test)]
pub use super::key_storage::tests::DummyKeyStorage;
pub type NodeId = Public;
pub type SessionId = DocumentAddress; pub type SessionId = DocumentAddress;
pub type SessionIdSignature = Signature;
#[derive(Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
/// Errors which can occur during encryption/decryption session /// Errors which can occur during encryption/decryption session
pub enum Error { pub enum Error {
/// Invalid node address has been passed.
InvalidNodeAddress,
/// Invalid node id has been passed.
InvalidNodeId,
/// Session with the given id already exists.
DuplicateSessionId,
/// Session with the given id is unknown.
InvalidSessionId,
/// Invalid number of nodes. /// Invalid number of nodes.
/// There must be at least two nodes participating in encryption. /// There must be at least two nodes participating in encryption.
/// There must be at least one node participating in decryption. /// There must be at least one node participating in decryption.
@ -39,28 +54,24 @@ pub enum Error {
/// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption. /// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption.
InvalidThreshold, InvalidThreshold,
/// Current state of encryption/decryption session does not allow to proceed request. /// Current state of encryption/decryption session does not allow to proceed request.
/// Reschedule this request for later processing.
TooEarlyForRequest,
/// Current state of encryption/decryption session does not allow to proceed request.
/// This means that either there is some comm-failure or node is misbehaving/cheating. /// This means that either there is some comm-failure or node is misbehaving/cheating.
InvalidStateForRequest, InvalidStateForRequest,
/// Some data in passed message was recognized as invalid. /// Message or some data in the message was recognized as invalid.
/// This means that node is misbehaving/cheating. /// This means that node is misbehaving/cheating.
InvalidMessage, InvalidMessage,
/// Connection to node, required for this session is not established.
NodeDisconnected,
/// Cryptographic error. /// Cryptographic error.
EthKey(String), EthKey(String),
} /// I/O error has occured.
Io(String),
#[derive(Debug, Clone)] /// Deserialization error has occured.
/// Data, which is stored on every node after DKG && encryption is completed. Serde(String),
pub struct EncryptedData { /// Key storage error.
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). KeyStorage(String),
threshold: usize,
/// Nodes ids numbers.
id_numbers: BTreeMap<NodeId, Secret>,
/// Node secret share.
secret_share: Secret,
/// Common (shared) encryption point.
common_point: Public,
/// Encrypted point.
encrypted_point: Public,
} }
impl From<ethkey::Error> for Error { impl From<ethkey::Error> for Error {
@ -69,8 +80,50 @@ impl From<ethkey::Error> for Error {
} }
} }
impl From<ethcrypto::Error> for Error {
fn from(err: ethcrypto::Error) -> Self {
Error::EthKey(err.into())
}
}
impl From<IoError> for Error {
fn from(err: IoError) -> Self {
Error::Io(err.to_string())
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"),
Error::InvalidNodeId => write!(f, "invalid node id has been passed"),
Error::DuplicateSessionId => write!(f, "session with the same id is already registered"),
Error::InvalidSessionId => write!(f, "invalid session id has been passed"),
Error::InvalidNodesCount => write!(f, "invalid nodes count"),
Error::InvalidNodesConfiguration => write!(f, "invalid nodes configuration"),
Error::InvalidThreshold => write!(f, "invalid threshold value has been passed"),
Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"),
Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"),
Error::InvalidMessage => write!(f, "invalid message is received"),
Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"),
Error::EthKey(ref e) => write!(f, "cryptographic error {}", e),
Error::Io(ref e) => write!(f, "i/o error {}", e),
Error::Serde(ref e) => write!(f, "serde error {}", e),
Error::KeyStorage(ref e) => write!(f, "key storage error {}", e),
}
}
}
impl Into<String> for Error {
fn into(self) -> String {
format!("{}", self)
}
}
mod cluster; mod cluster;
mod decryption_session; mod decryption_session;
mod encryption_session; mod encryption_session;
mod io;
mod math; mod math;
mod message; mod message;
mod net;

View File

@ -0,0 +1,63 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use std::net::SocketAddr;
use std::time::Duration;
use std::collections::BTreeSet;
use futures::{Future, Poll};
use tokio_core::reactor::Handle;
use tokio_core::net::TcpStream;
use ethkey::KeyPair;
use key_server_cluster::{Error, NodeId};
use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection;
/// Create future for accepting incoming connection.
pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Deadline<AcceptConnection> {
let accept = AcceptConnection {
handshake: accept_handshake(stream, self_key_pair, trusted_nodes),
address: address,
};
deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout")
}
/// Future for accepting incoming connection.
pub struct AcceptConnection {
handshake: Handshake<TcpStream>,
address: SocketAddr,
}
impl Future for AcceptConnection {
type Item = Result<Connection, Error>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (stream, result) = try_ready!(self.handshake.poll());
let result = match result {
Ok(result) => result,
Err(err) => return Ok(Err(err).into()),
};
let connection = Connection {
stream: stream.into(),
address: self.address,
node_id: result.node_id,
key: result.shared_key,
};
Ok(Ok(connection).into())
}
}

View File

@ -0,0 +1,90 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeSet;
use std::io;
use std::time::Duration;
use std::net::SocketAddr;
use futures::{Future, Poll, Async};
use tokio_core::reactor::Handle;
use tokio_core::net::{TcpStream, TcpStreamNew};
use ethkey::KeyPair;
use key_server_cluster::{Error, NodeId};
use key_server_cluster::io::{handshake, Handshake, Deadline, deadline};
use key_server_cluster::net::Connection;
/// Create future for connecting to other node.
pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet<NodeId>) -> Deadline<Connect> {
let connect = Connect {
state: ConnectState::TcpConnect(TcpStream::connect(address, handle)),
address: address.clone(),
self_key_pair: self_key_pair,
trusted_nodes: trusted_nodes,
};
deadline(Duration::new(5, 0), handle, connect).expect("Failed to create timeout")
}
enum ConnectState {
TcpConnect(TcpStreamNew),
Handshake(Handshake<TcpStream>),
Connected,
}
/// Future for connecting to other node.
pub struct Connect {
state: ConnectState,
address: SocketAddr,
self_key_pair: KeyPair,
trusted_nodes: BTreeSet<NodeId>,
}
impl Future for Connect {
type Item = Result<Connection, Error>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let (next, result) = match self.state {
ConnectState::TcpConnect(ref mut future) => {
let stream = try_ready!(future.poll());
let handshake = handshake(stream, self.self_key_pair.clone(), self.trusted_nodes.clone());
(ConnectState::Handshake(handshake), Async::NotReady)
},
ConnectState::Handshake(ref mut future) => {
let (stream, result) = try_ready!(future.poll());
let result = match result {
Ok(result) => result,
Err(err) => return Ok(Async::Ready(Err(err))),
};
let connection = Connection {
stream: stream.into(),
address: self.address,
node_id: result.node_id,
key: result.shared_key,
};
(ConnectState::Connected, Async::Ready(Ok(connection)))
},
ConnectState::Connected => panic!("poll Connect after it's done"),
};
self.state = next;
match result {
// by polling again, we register new future
Async::NotReady => self.poll(),
result => Ok(result)
}
}
}

View File

@ -0,0 +1,32 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::net;
use ethkey::Secret;
use key_server_cluster::NodeId;
use key_server_cluster::io::SharedTcpStream;
/// Established connection data
pub struct Connection {
/// Peer address.
pub address: net::SocketAddr,
/// Connection stream.
pub stream: SharedTcpStream,
/// Peer node id.
pub node_id: NodeId,
/// Encryption key.
pub key: Secret,
}

View File

@ -0,0 +1,23 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
mod accept_connection;
mod connect;
mod connection;
pub use self::accept_connection::{AcceptConnection, accept_connection};
pub use self::connect::{Connect, connect};
pub use self::connection::Connection;

View File

@ -15,15 +15,34 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::path::PathBuf; use std::path::PathBuf;
use std::collections::BTreeMap;
use serde_json;
use ethkey::{Secret, Public};
use util::Database; use util::Database;
use types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; use types::all::{Error, ServiceConfiguration, DocumentAddress, NodeId};
use serialization::{SerializablePublic, SerializableSecret};
#[derive(Debug, Clone, PartialEq)]
/// Encrypted key share, stored by key storage on the single key server.
pub struct DocumentKeyShare {
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
pub threshold: usize,
/// Nodes ids numbers.
pub id_numbers: BTreeMap<NodeId, Secret>,
/// Node secret share.
pub secret_share: Secret,
/// Common (shared) encryption point.
pub common_point: Public,
/// Encrypted point.
pub encrypted_point: Public,
}
/// Document encryption keys storage /// Document encryption keys storage
pub trait KeyStorage: Send + Sync { pub trait KeyStorage: Send + Sync {
/// Insert document encryption key /// Insert document encryption key
fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error>; fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>;
/// Get document encryption key /// Get document encryption key
fn get(&self, document: &DocumentAddress) -> Result<DocumentKey, Error>; fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error>;
} }
/// Persistent document encryption keys storage /// Persistent document encryption keys storage
@ -31,6 +50,21 @@ pub struct PersistentKeyStorage {
db: Database, db: Database,
} }
#[derive(Serialize, Deserialize)]
/// Encrypted key share, as it is stored by key storage on the single key server.
struct SerializableDocumentKeyShare {
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
pub threshold: usize,
/// Nodes ids numbers.
pub id_numbers: BTreeMap<SerializablePublic, SerializableSecret>,
/// Node secret share.
pub secret_share: SerializableSecret,
/// Common (shared) encryption point.
pub common_point: SerializablePublic,
/// Encrypted point.
pub encrypted_point: SerializablePublic,
}
impl PersistentKeyStorage { impl PersistentKeyStorage {
/// Create new persistent document encryption keys storage /// Create new persistent document encryption keys storage
pub fn new(config: &ServiceConfiguration) -> Result<Self, Error> { pub fn new(config: &ServiceConfiguration) -> Result<Self, Error> {
@ -45,41 +79,71 @@ impl PersistentKeyStorage {
} }
impl KeyStorage for PersistentKeyStorage { impl KeyStorage for PersistentKeyStorage {
fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> {
let key: SerializableDocumentKeyShare = key.into();
let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?;
let mut batch = self.db.transaction(); let mut batch = self.db.transaction();
batch.put(None, &document, &key); batch.put(None, &document, &key);
self.db.write(batch).map_err(Error::Database) self.db.write(batch).map_err(Error::Database)
} }
fn get(&self, document: &DocumentAddress) -> Result<DocumentKey, Error> { fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error> {
self.db.get(None, document) self.db.get(None, document)
.map_err(Error::Database)? .map_err(Error::Database)?
.ok_or(Error::DocumentNotFound) .ok_or(Error::DocumentNotFound)
.map(|key| key.to_vec()) .map(|key| key.to_vec())
.and_then(|key| serde_json::from_slice::<SerializableDocumentKeyShare>(&key).map_err(|e| Error::Database(e.to_string())))
.map(Into::into)
}
}
impl From<DocumentKeyShare> for SerializableDocumentKeyShare {
fn from(key: DocumentKeyShare) -> Self {
SerializableDocumentKeyShare {
threshold: key.threshold,
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
secret_share: key.secret_share.into(),
common_point: key.common_point.into(),
encrypted_point: key.encrypted_point.into(),
}
}
}
impl From<SerializableDocumentKeyShare> for DocumentKeyShare {
fn from(key: SerializableDocumentKeyShare) -> Self {
DocumentKeyShare {
threshold: key.threshold,
id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
secret_share: key.secret_share.into(),
common_point: key.common_point.into(),
encrypted_point: key.encrypted_point.into(),
}
} }
} }
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use std::collections::HashMap; use std::collections::{BTreeMap, HashMap};
use parking_lot::RwLock; use parking_lot::RwLock;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use super::super::types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; use ethkey::{Random, Generator};
use super::{KeyStorage, PersistentKeyStorage}; use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration,
DocumentAddress, EncryptionConfiguration};
use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare};
#[derive(Default)] #[derive(Default)]
/// In-memory document encryption keys storage /// In-memory document encryption keys storage
pub struct DummyKeyStorage { pub struct DummyKeyStorage {
keys: RwLock<HashMap<DocumentAddress, DocumentKey>>, keys: RwLock<HashMap<DocumentAddress, DocumentKeyShare>>,
} }
impl KeyStorage for DummyKeyStorage { impl KeyStorage for DummyKeyStorage {
fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> {
self.keys.write().insert(document, key); self.keys.write().insert(document, key);
Ok(()) Ok(())
} }
fn get(&self, document: &DocumentAddress) -> Result<DocumentKey, Error> { fn get(&self, document: &DocumentAddress) -> Result<DocumentKeyShare, Error> {
self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound) self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound)
} }
} }
@ -88,15 +152,46 @@ pub mod tests {
fn persistent_key_storage() { fn persistent_key_storage() {
let path = RandomTempPath::create_dir(); let path = RandomTempPath::create_dir();
let config = ServiceConfiguration { let config = ServiceConfiguration {
listener_addr: "0.0.0.0".to_owned(), listener_address: NodeAddress {
listener_port: 8082, address: "0.0.0.0".to_owned(),
port: 8082,
},
data_path: path.as_str().to_owned(), data_path: path.as_str().to_owned(),
cluster_config: ClusterConfiguration {
threads: 1,
self_private: (**Random.generate().unwrap().secret().clone()).into(),
listener_address: NodeAddress {
address: "0.0.0.0".to_owned(),
port: 8083,
},
nodes: BTreeMap::new(),
allow_connecting_to_higher_nodes: false,
encryption_config: EncryptionConfiguration {
key_check_timeout_ms: 10,
},
},
}; };
let key1 = DocumentAddress::from(1); let key1 = DocumentAddress::from(1);
let value1: DocumentKey = vec![0x77, 0x88]; let value1 = DocumentKeyShare {
threshold: 100,
id_numbers: vec![
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
].into_iter().collect(),
secret_share: Random.generate().unwrap().secret().clone(),
common_point: Random.generate().unwrap().public().clone(),
encrypted_point: Random.generate().unwrap().public().clone(),
};
let key2 = DocumentAddress::from(2); let key2 = DocumentAddress::from(2);
let value2: DocumentKey = vec![0x11, 0x22]; let value2 = DocumentKeyShare {
threshold: 200,
id_numbers: vec![
(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone())
].into_iter().collect(),
secret_share: Random.generate().unwrap().secret().clone(),
common_point: Random.generate().unwrap().public().clone(),
encrypted_point: Random.generate().unwrap().public().clone(),
};
let key3 = DocumentAddress::from(3); let key3 = DocumentAddress::from(3);
let key_storage = PersistentKeyStorage::new(&config).unwrap(); let key_storage = PersistentKeyStorage::new(&config).unwrap();

View File

@ -14,10 +14,22 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate byteorder;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
#[macro_use]
extern crate futures;
extern crate futures_cpupool;
extern crate hyper; extern crate hyper;
extern crate parking_lot; extern crate parking_lot;
extern crate rustc_serialize;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
extern crate tokio_core;
extern crate tokio_service;
extern crate tokio_proto;
extern crate url; extern crate url;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
@ -38,16 +50,19 @@ mod acl_storage;
mod http_listener; mod http_listener;
mod key_server; mod key_server;
mod key_storage; mod key_storage;
mod serialization;
pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public, pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public,
Error, ServiceConfiguration}; Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, EncryptionConfiguration};
pub use traits::{KeyServer}; pub use traits::{KeyServer};
/// Start new key server instance /// Start new key server instance
pub fn start(config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> { pub fn start(config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> {
let acl_storage = acl_storage::DummyAclStorage::default(); use std::sync::Arc;
let key_storage = key_storage::PersistentKeyStorage::new(&config)?;
let key_server = key_server::KeyServerImpl::new(acl_storage, key_storage); let acl_storage = Arc::new(acl_storage::DummyAclStorage::default());
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?;
let listener = http_listener::KeyServerHttpListener::start(config, key_server)?; let listener = http_listener::KeyServerHttpListener::start(config, key_server)?;
Ok(Box::new(listener)) Ok(Box::new(listener))
} }

View File

@ -0,0 +1,260 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt;
use std::cmp::{Ord, PartialOrd, Ordering};
use std::ops::Deref;
use rustc_serialize::hex::ToHex;
use serde::{Serialize, Deserialize, Serializer, Deserializer};
use serde::de::{Visitor, Error as SerdeError};
use ethkey::{Public, Secret, Signature};
use util::H256;
#[derive(Clone, Debug)]
/// Serializable Signature.
pub struct SerializableSignature(Signature);
impl<T> From<T> for SerializableSignature where Signature: From<T> {
fn from(s: T) -> SerializableSignature {
SerializableSignature(s.into())
}
}
impl Into<Signature> for SerializableSignature {
fn into(self) -> Signature {
self.0
}
}
impl Deref for SerializableSignature {
type Target = Signature;
fn deref(&self) -> &Signature {
&self.0
}
}
impl Serialize for SerializableSignature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
serializer.serialize_str(&(*self.0).to_hex())
}
}
impl Deserialize for SerializableSignature {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer {
struct HashVisitor;
impl Visitor for HashVisitor {
type Value = SerializableSignature;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a hex-encoded Signature")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
value.parse().map(|s| SerializableSignature(s)).map_err(SerdeError::custom)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
}
deserializer.deserialize(HashVisitor)
}
}
#[derive(Clone, Debug)]
/// Serializable H256.
pub struct SerializableH256(H256);
impl<T> From<T> for SerializableH256 where H256: From<T> {
fn from(s: T) -> SerializableH256 {
SerializableH256(s.into())
}
}
impl Into<H256> for SerializableH256 {
fn into(self) -> H256 {
self.0
}
}
impl Deref for SerializableH256 {
type Target = H256;
fn deref(&self) -> &H256 {
&self.0
}
}
impl Serialize for SerializableH256 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
serializer.serialize_str(&(*self.0).to_hex())
}
}
impl Deserialize for SerializableH256 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer {
struct HashVisitor;
impl Visitor for HashVisitor {
type Value = SerializableH256;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a hex-encoded H256")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
value.parse().map(|s| SerializableH256(s)).map_err(SerdeError::custom)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
}
deserializer.deserialize(HashVisitor)
}
}
#[derive(Clone, Debug)]
/// Serializable EC scalar/secret key.
pub struct SerializableSecret(Secret);
impl<T> From<T> for SerializableSecret where Secret: From<T> {
fn from(s: T) -> SerializableSecret {
SerializableSecret(s.into())
}
}
impl Into<Secret> for SerializableSecret {
fn into(self) -> Secret {
self.0
}
}
impl Deref for SerializableSecret {
type Target = Secret;
fn deref(&self) -> &Secret {
&self.0
}
}
impl Serialize for SerializableSecret {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
serializer.serialize_str(&(*self.0).to_hex())
}
}
impl Deserialize for SerializableSecret {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer {
struct HashVisitor;
impl Visitor for HashVisitor {
type Value = SerializableSecret;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a hex-encoded EC scalar")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
value.parse().map(|s| SerializableSecret(s)).map_err(SerdeError::custom)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
}
deserializer.deserialize(HashVisitor)
}
}
#[derive(Clone, Debug)]
/// Serializable EC point/public key.
pub struct SerializablePublic(Public);
impl<T> From<T> for SerializablePublic where Public: From<T> {
fn from(p: T) -> SerializablePublic {
SerializablePublic(p.into())
}
}
impl Into<Public> for SerializablePublic {
fn into(self) -> Public {
self.0
}
}
impl Deref for SerializablePublic {
type Target = Public;
fn deref(&self) -> &Public {
&self.0
}
}
impl Eq for SerializablePublic { }
impl PartialEq for SerializablePublic {
fn eq(&self, other: &SerializablePublic) -> bool {
self.0.eq(&other.0)
}
}
impl Ord for SerializablePublic {
fn cmp(&self, other: &SerializablePublic) -> Ordering {
self.0.cmp(&other.0)
}
}
impl PartialOrd for SerializablePublic {
fn partial_cmp(&self, other: &SerializablePublic) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl Serialize for SerializablePublic {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
serializer.serialize_str(&(*self.0).to_hex())
}
}
impl Deserialize for SerializablePublic {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer {
struct HashVisitor;
impl Visitor for HashVisitor {
type Value = SerializablePublic;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a hex-encoded EC point")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: SerdeError {
value.parse().map(|s| SerializablePublic(s)).map_err(SerdeError::custom)
}
fn visit_string<E>(self, value: String) -> Result<Self::Value, E> where E: SerdeError {
self.visit_str(value.as_ref())
}
}
deserializer.deserialize(HashVisitor)
}
}

View File

@ -19,6 +19,8 @@ use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey}
#[ipc(client_ident="RemoteKeyServer")] #[ipc(client_ident="RemoteKeyServer")]
/// Secret store key server /// Secret store key server
pub trait KeyServer: Send + Sync { pub trait KeyServer: Send + Sync {
/// Generate encryption key for given document.
fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result<DocumentEncryptedKey, Error>;
/// Request encryption key of given document for given requestor /// Request encryption key of given document for given requestor
fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error>; fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result<DocumentEncryptedKey, Error>;
} }

View File

@ -15,10 +15,14 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt; use std::fmt;
use std::collections::BTreeMap;
use ethkey; use ethkey;
use util; use util;
use key_server_cluster;
/// Node id.
pub type NodeId = ethkey::Public;
/// Document address type. /// Document address type.
pub type DocumentAddress = util::H256; pub type DocumentAddress = util::H256;
/// Document key type. /// Document key type.
@ -46,16 +50,53 @@ pub enum Error {
Internal(String), Internal(String),
} }
#[derive(Debug)]
#[binary]
/// Secret store configuration
pub struct NodeAddress {
/// IP address.
pub address: String,
/// IP port.
pub port: u16,
}
#[derive(Debug)] #[derive(Debug)]
#[binary] #[binary]
/// Secret store configuration /// Secret store configuration
pub struct ServiceConfiguration { pub struct ServiceConfiguration {
/// Interface to listen to /// HTTP listener address.
pub listener_addr: String, pub listener_address: NodeAddress,
/// Port to listen to
pub listener_port: u16,
/// Data directory path for secret store /// Data directory path for secret store
pub data_path: String, pub data_path: String,
/// Cluster configuration.
pub cluster_config: ClusterConfiguration,
}
#[derive(Debug)]
#[binary]
/// Key server cluster configuration
pub struct ClusterConfiguration {
/// Number of threads reserved by cluster.
pub threads: usize,
/// Private key this node holds.
pub self_private: Vec<u8>, // holds ethkey::Secret
/// This node address.
pub listener_address: NodeAddress,
/// All cluster nodes addresses.
pub nodes: BTreeMap<ethkey::Public, NodeAddress>,
/// Allow outbound connections to 'higher' nodes.
/// This is useful for tests, but slower a bit for production.
pub allow_connecting_to_higher_nodes: bool,
/// Encryption session configuration.
pub encryption_config: EncryptionConfiguration,
}
#[derive(Clone, Debug)]
#[binary]
/// Encryption parameters.
pub struct EncryptionConfiguration {
/// Key check timeout.
pub key_check_timeout_ms: u64,
} }
impl fmt::Display for Error { impl fmt::Display for Error {
@ -70,6 +111,18 @@ impl fmt::Display for Error {
} }
} }
impl From<ethkey::Error> for Error {
fn from(err: ethkey::Error) -> Self {
Error::Internal(err.into())
}
}
impl From<key_server_cluster::Error> for Error {
fn from(err: key_server_cluster::Error) -> Self {
Error::Internal(err.into())
}
}
impl Into<String> for Error { impl Into<String> for Error {
fn into(self) -> String { fn into(self) -> String {
format!("{}", self) format!("{}", self)