Update jsonrpc dependencies and rewrite dapps to futures. (#6522)

* Bump version.

* Fix RPC crate.

* Fix BoxFuture in crates.

* Compiles and passes tests!

* Get rid of .boxed()

* Fixing issues with the UI.

* Remove minihttp. Support threads.

* Reimplement files serving to do it in chunks.

* Increase chunk size.

* Remove some unecessary copying.

* Fix tests.

* Fix stratum warning and ipfs todo.

* Switch to proper branch of jsonrpc.

* Update Cargo.lock.

* Update docs.

* Include dapps-glue in workspace.

* fixed merge artifacts

* Fix test compilation.
This commit is contained in:
Tomasz Drwięga 2017-10-05 12:35:01 +02:00 committed by Arkadiy Paronyan
parent 492da38d67
commit e8b418ca03
118 changed files with 2090 additions and 2908 deletions

384
Cargo.lock generated
View File

@ -39,11 +39,6 @@ name = "ansi_term"
version = "0.9.0" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "antidote"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "app_dirs" name = "app_dirs"
version = "1.1.1" version = "1.1.1"
@ -292,6 +287,14 @@ dependencies = [
"vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "clippy"
version = "0.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "clippy" name = "clippy"
version = "0.0.103" version = "0.0.103"
@ -309,6 +312,20 @@ dependencies = [
"clippy_lints 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)", "clippy_lints 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "clippy_lints"
version = "0.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "clippy_lints" name = "clippy_lints"
version = "0.0.103" version = "0.0.103"
@ -553,7 +570,6 @@ name = "ethcore"
version = "1.8.0" version = "1.8.0"
dependencies = [ dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomable 0.1.0", "bloomable 0.1.0",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bn 0.4.4 (git+https://github.com/paritytech/bn)", "bn 0.4.4 (git+https://github.com/paritytech/bn)",
@ -561,8 +577,6 @@ dependencies = [
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"common-types 0.1.0", "common-types 0.1.0",
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.8.0", "ethash 1.8.0",
"ethcore-bigint 0.1.3", "ethcore-bigint 0.1.3",
"ethcore-bloom-journal 0.1.0", "ethcore-bloom-journal 0.1.0",
@ -579,7 +593,7 @@ dependencies = [
"ethkey 0.2.0", "ethkey 0.2.0",
"ethstore 0.1.0", "ethstore 0.1.0",
"evm 0.1.0", "evm 0.1.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"hardware-wallet 1.8.0", "hardware-wallet 1.8.0",
"hash 0.1.0", "hash 0.1.0",
"hashdb 0.1.0", "hashdb 0.1.0",
@ -587,7 +601,7 @@ dependencies = [
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"memorydb 0.1.0", "memorydb 0.1.0",
@ -605,7 +619,6 @@ dependencies = [
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"semantic_version 0.1.0", "semantic_version 0.1.0",
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"stats 0.1.0", "stats 0.1.0",
"table 0.1.0", "table 0.1.0",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
@ -735,7 +748,7 @@ dependencies = [
"ethcore-network 1.8.0", "ethcore-network 1.8.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"evm 0.1.0", "evm 0.1.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
@ -820,7 +833,7 @@ dependencies = [
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"ethcrypto 0.1.0", "ethcrypto 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)",
@ -852,16 +865,14 @@ dependencies = [
"ethcore-ipc-nano 1.8.0", "ethcore-ipc-nano 1.8.0",
"ethcore-logger 1.8.0", "ethcore-logger 1.8.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-tcp-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1077,12 +1088,11 @@ dependencies = [
name = "fetch" name = "fetch"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"reqwest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1116,7 +1126,7 @@ dependencies = [
[[package]] [[package]]
name = "futures" name = "futures"
version = "0.1.14" version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
@ -1124,7 +1134,7 @@ name = "futures-cpupool"
version = "0.1.6" version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1269,13 +1279,39 @@ dependencies = [
] ]
[[package]] [[package]]
name = "hyper-native-tls" name = "hyper"
version = "0.2.4" version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hyper-tls"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
"native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-tls 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1363,10 +1399,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "jsonrpc-core" name = "jsonrpc-core"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1375,25 +1411,24 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-http-server" name = "jsonrpc-http-server"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "jsonrpc-ipc-server" name = "jsonrpc-ipc-server"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)", "parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1401,47 +1436,32 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-macros" name = "jsonrpc-macros"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "jsonrpc-minihttp-server"
version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)",
"tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "jsonrpc-pubsub" name = "jsonrpc-pubsub"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "jsonrpc-server-utils" name = "jsonrpc-server-utils"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1449,25 +1469,23 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-tcp-server" name = "jsonrpc-tcp-server"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "jsonrpc-ws-server" name = "jsonrpc-ws-server"
version = "7.0.0" version = "8.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8#cf6f3481760f6ee8fbef7a987954ffc720ff4acf"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1533,12 +1551,12 @@ dependencies = [
[[package]] [[package]]
name = "linked-hash-map" name = "linked-hash-map"
version = "0.3.0" version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "linked-hash-map" name = "linked-hash-map"
version = "0.4.2" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
@ -1634,17 +1652,6 @@ dependencies = [
"unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "mime_guess"
version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
"phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "mime_guess" name = "mime_guess"
version = "2.0.0-alpha.2" version = "2.0.0-alpha.2"
@ -1775,7 +1782,7 @@ dependencies = [
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.3", "ethcore-bigint 0.1.3",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contract-generator 0.1.0", "native-contract-generator 0.1.0",
] ]
@ -1828,7 +1835,7 @@ dependencies = [
"ethcore-io 1.8.0", "ethcore-io 1.8.0",
"ethcore-network 1.8.0", "ethcore-network 1.8.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contracts 0.1.0", "native-contracts 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1838,7 +1845,7 @@ dependencies = [
name = "node-health" name = "node-health"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2053,12 +2060,12 @@ dependencies = [
"ethkey 0.2.0", "ethkey 0.2.0",
"ethsync 1.8.0", "ethsync 1.8.0",
"fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)",
"isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"node-filter 1.8.0", "node-filter 1.8.0",
"node-health 0.1.0", "node-health 0.1.0",
@ -2105,15 +2112,15 @@ dependencies = [
"ethcore-devtools 1.8.0", "ethcore-devtools 1.8.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"fetch 0.1.0", "fetch 0.1.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 1.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"node-health 0.1.0", "node-health 0.1.0",
"parity-dapps-glue 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-hash-fetch 1.8.0", "parity-hash-fetch 1.8.0",
@ -2125,12 +2132,25 @@ dependencies = [
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"zip 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)", "zip 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "parity-dapps-glue"
version = "1.8.0"
dependencies = [
"aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)",
"quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
"syntex_syntax 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "parity-dapps-glue" name = "parity-dapps-glue"
version = "1.8.0" version = "1.8.0"
@ -2154,11 +2174,11 @@ dependencies = [
"ethcore-bytes 0.1.0", "ethcore-bytes 0.1.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"fetch 0.1.0", "fetch 0.1.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 1.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)",
"native-contracts 0.1.0", "native-contracts 0.1.0",
"parity-reactor 0.1.0", "parity-reactor 0.1.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2175,10 +2195,11 @@ dependencies = [
"ethcore-bigint 0.1.3", "ethcore-bigint 0.1.3",
"ethcore-bytes 0.1.0", "ethcore-bytes 0.1.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.2.0", "rlp 0.2.0",
"unicase 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -2208,7 +2229,7 @@ dependencies = [
name = "parity-reactor" name = "parity-reactor"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -2236,18 +2257,16 @@ dependencies = [
"ethstore 0.1.0", "ethstore 0.1.0",
"ethsync 1.8.0", "ethsync 1.8.0",
"fetch 0.1.0", "fetch 0.1.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hardware-wallet 1.8.0", "hardware-wallet 1.8.0",
"hash 0.1.0", "hash 0.1.0",
"itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ipc-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"node-health 0.1.0", "node-health 0.1.0",
@ -2275,19 +2294,16 @@ dependencies = [
name = "parity-rpc-client" name = "parity-rpc-client"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ethcore-util 1.8.0", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"hash 0.1.0", "hash 0.1.0",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-rpc 1.8.0", "parity-rpc 1.8.0",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -2297,7 +2313,7 @@ version = "0.1.5"
source = "git+https://github.com/nikvolf/parity-tokio-ipc#d6c5b3cfcc913a1b9cf0f0562a10b083ceb9fb7c" source = "git+https://github.com/nikvolf/parity-tokio-ipc#d6c5b3cfcc913a1b9cf0f0562a10b083ceb9fb7c"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)",
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2344,7 +2360,7 @@ dependencies = [
"ethcore-ipc-codegen 1.8.0", "ethcore-ipc-codegen 1.8.0",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"ethsync 1.8.0", "ethsync 1.8.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"ipc-common-types 1.8.0", "ipc-common-types 1.8.0",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-hash-fetch 1.8.0", "parity-hash-fetch 1.8.0",
@ -2374,11 +2390,10 @@ dependencies = [
"ethcore-network 1.8.0", "ethcore-network 1.8.0",
"ethcrypto 0.1.0", "ethcrypto 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ordered-float 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "ordered-float 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2518,7 +2533,7 @@ name = "price-info"
version = "1.7.0" version = "1.7.0"
dependencies = [ dependencies = [
"fetch 0.1.0", "fetch 0.1.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2662,7 +2677,7 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2698,16 +2713,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "reqwest" name = "reqwest"
version = "0.6.2" version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper-native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libflate 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "libflate 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_urlencoded 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-tls 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -2802,7 +2823,7 @@ version = "1.4.0"
dependencies = [ dependencies = [
"bigint 4.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 4.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-util 1.8.0", "ethcore-util 1.8.0",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-rpc 1.8.0", "parity-rpc 1.8.0",
"parity-rpc-client 1.4.0", "parity-rpc-client 1.4.0",
"rpassword 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rpassword 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3228,7 +3249,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3243,61 +3264,28 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "tokio-minihttp"
version = "0.1.0"
source = "git+https://github.com/tomusdrw/tokio-minihttp#67a400060bd29e51beaf206c552845255b6f699f"
dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "tokio-named-pipes" name = "tokio-named-pipes"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/nikvolf/tokio-named-pipes#0b9b728eaeb0a6673c287ac7692be398fd651752" source = "git+https://github.com/nikvolf/tokio-named-pipes#0b9b728eaeb0a6673c287ac7692be398fd651752"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "tokio-proto"
version = "0.1.0"
source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3"
dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "tokio-proto" name = "tokio-proto"
version = "0.1.1" version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3314,7 +3302,7 @@ name = "tokio-service"
version = "0.1.0" version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -3322,17 +3310,28 @@ name = "tokio-timer"
version = "0.1.2" version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "tokio-tls"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"native-tls 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "tokio-uds" name = "tokio-uds"
version = "0.1.5" version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3622,7 +3621,6 @@ dependencies = [
"checksum advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e06588080cb19d0acb6739808aafa5f26bfb2ca015b2b6370028b44cf7cb8a9a" "checksum advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e06588080cb19d0acb6739808aafa5f26bfb2ca015b2b6370028b44cf7cb8a9a"
"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" "checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699"
"checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6" "checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
"checksum antidote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5"
"checksum app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7d1c0d48a81bbb13043847f957971f4d87c81542d80ece5e84ba3cba4058fd4" "checksum app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7d1c0d48a81bbb13043847f957971f4d87c81542d80ece5e84ba3cba4058fd4"
"checksum arrayvec 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "699e63a93b79d717e8c3b5eb1b28b7780d0d6d9e59a72eb769291c83b0c8dc67" "checksum arrayvec 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "699e63a93b79d717e8c3b5eb1b28b7780d0d6d9e59a72eb769291c83b0c8dc67"
"checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0" "checksum aster 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfdf7355d9db158df68f976ed030ab0f6578af811f5a7bb6dcf221ec24e0e0"
@ -3653,8 +3651,10 @@ dependencies = [
"checksum clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3451e409013178663435d6f15fdb212f14ee4424a3d74f979d081d0a66b6f1f2" "checksum clap 2.26.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3451e409013178663435d6f15fdb212f14ee4424a3d74f979d081d0a66b6f1f2"
"checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32"
"checksum clippy 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)" = "5ad3f3dc94d81a6505eb28bf545b501fc9d7525ee9864df5a4b2b6d82629f038" "checksum clippy 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)" = "5ad3f3dc94d81a6505eb28bf545b501fc9d7525ee9864df5a4b2b6d82629f038"
"checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b"
"checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a" "checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a"
"checksum clippy_lints 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)" = "c058b299bb1289c7e8c063bd49477715c91cb3c3344bcf2e25326860b0675654" "checksum clippy_lints 0.0.163 (registry+https://github.com/rust-lang/crates.io-index)" = "c058b299bb1289c7e8c063bd49477715c91cb3c3344bcf2e25326860b0675654"
"checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96"
"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd" "checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
"checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299" "checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299"
"checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591" "checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591"
@ -3682,7 +3682,7 @@ dependencies = [
"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" "checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344"
"checksum foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d" "checksum foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d"
"checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866" "checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866"
"checksum futures 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4b63a4792d4f8f686defe3b39b92127fea6344de5d38202b2ee5a11bbbf29d6a" "checksum futures 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "05a23db7bd162d4e8265968602930c476f688f0c180b44bdaf55e0cb2c687558"
"checksum futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "77d49e7de8b91b20d6fda43eea906637eff18b96702eb6b2872df8bfab1ad2b5" "checksum futures-cpupool 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "77d49e7de8b91b20d6fda43eea906637eff18b96702eb6b2872df8bfab1ad2b5"
"checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb" "checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb"
"checksum getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "65922871abd2f101a2eb0eaebadc66668e54a87ad9c3dd82520b5f86ede5eff9" "checksum getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "65922871abd2f101a2eb0eaebadc66668e54a87ad9c3dd82520b5f86ede5eff9"
@ -3696,7 +3696,8 @@ dependencies = [
"checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07"
"checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "<none>" "checksum hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)" = "<none>"
"checksum hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "368cb56b2740ebf4230520e2b90ebb0461e69034d85d1945febd9b3971426db2" "checksum hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "368cb56b2740ebf4230520e2b90ebb0461e69034d85d1945febd9b3971426db2"
"checksum hyper-native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "72332e4a35d3059583623b50e98e491b78f8b96c5521fcb3f428167955aa56e8" "checksum hyper 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "641abc3e3fcf0de41165595f801376e01106bca1fd876dda937730e477ca004c"
"checksum hyper-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c81fa95203e2a6087242c38691a0210f23e9f3f8f944350bd676522132e2985"
"checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d"
"checksum igd 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "356a0dc23a4fa0f8ce4777258085d00a01ea4923b2efd93538fc44bf5e1bda76" "checksum igd 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "356a0dc23a4fa0f8ce4777258085d00a01ea4923b2efd93538fc44bf5e1bda76"
"checksum integer-encoding 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a053c9c7dcb7db1f2aa012c37dc176c62e4cdf14898dee0eecc606de835b8acb" "checksum integer-encoding 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a053c9c7dcb7db1f2aa012c37dc176c62e4cdf14898dee0eecc606de835b8acb"
@ -3706,15 +3707,14 @@ dependencies = [
"checksum itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4833d6978da405305126af4ac88569b5d71ff758581ce5a987dbfa3755f694fc" "checksum itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4833d6978da405305126af4ac88569b5d71ff758581ce5a987dbfa3755f694fc"
"checksum itertools 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ab4d6a273c31ef276c917019239588b23bc696f277af8db10742cba3c27ec2f0" "checksum itertools 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ab4d6a273c31ef276c917019239588b23bc696f277af8db10742cba3c27ec2f0"
"checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" "checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c"
"checksum jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-core 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-ipc-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-server-utils 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-tcp-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.8)" = "<none>"
"checksum jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
"checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf" "checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf"
@ -3723,8 +3723,8 @@ dependencies = [
"checksum libflate 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "a2aa04ec0100812d31a5366130ff9e793291787bc31da845bede4a00ea329830" "checksum libflate 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "a2aa04ec0100812d31a5366130ff9e793291787bc31da845bede4a00ea329830"
"checksum libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)" = "<none>" "checksum libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)" = "<none>"
"checksum libusb-sys 0.2.3 (git+https://github.com/paritytech/libusb-sys)" = "<none>" "checksum libusb-sys 0.2.3 (git+https://github.com/paritytech/libusb-sys)" = "<none>"
"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939" "checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"
"checksum linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d2aab0478615bb586559b0114d94dd8eca4fdbb73b443adcb0d00b61692b4bf"
"checksum local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1ceb20f39ff7ae42f3ff9795f3986b1daad821caaa1e1732a0944103a5a1a66" "checksum local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1ceb20f39ff7ae42f3ff9795f3986b1daad821caaa1e1732a0944103a5a1a66"
"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" "checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21" "checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21"
@ -3735,7 +3735,6 @@ dependencies = [
"checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b" "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
"checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0"
"checksum mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e3d709ffbb330e1566dc2f2a3c9b58a5ad4a381f740b810cd305dc3f089bc160" "checksum mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e3d709ffbb330e1566dc2f2a3c9b58a5ad4a381f740b810cd305dc3f089bc160"
"checksum mime_guess 1.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bbee1a836f344ac39d4a59bfe7be2bd3150353ff71678afb740216f8270b333e"
"checksum mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)" = "27a5e6679a0614e25adc14c6434ba84e41632b765a6d9cb2031a0cca682699ae" "checksum mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)" = "27a5e6679a0614e25adc14c6434ba84e41632b765a6d9cb2031a0cca682699ae"
"checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4" "checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4"
"checksum mio 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "dbd91d3bfbceb13897065e97b2ef177a09a438cb33612b2d371bf568819a9313" "checksum mio 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "dbd91d3bfbceb13897065e97b2ef177a09a438cb33612b2d371bf568819a9313"
@ -3805,7 +3804,7 @@ dependencies = [
"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" "checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b"
"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" "checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db"
"checksum reqwest 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1d56dbe269dbe19d716b76ec8c3efce8ef84e974f5b7e5527463e8c0507d4e17" "checksum reqwest 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5866613d84e2a39c0479a960bf2d0eff1fbfc934f02cd42b5c08c1e1efc5b1fd"
"checksum ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)" = "24293de46bac74c9b9c05b40ff8496bbc8b9ae242a9b89f754e1154a43bc7c4c" "checksum ring 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)" = "24293de46bac74c9b9c05b40ff8496bbc8b9ae242a9b89f754e1154a43bc7c4c"
"checksum rocksdb 0.4.5 (git+https://github.com/paritytech/rust-rocksdb)" = "<none>" "checksum rocksdb 0.4.5 (git+https://github.com/paritytech/rust-rocksdb)" = "<none>"
"checksum rocksdb-sys 0.3.0 (git+https://github.com/paritytech/rust-rocksdb)" = "<none>" "checksum rocksdb-sys 0.3.0 (git+https://github.com/paritytech/rust-rocksdb)" = "<none>"
@ -3866,12 +3865,11 @@ dependencies = [
"checksum tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d52d12ad79e4063e0cb0ca5efa202ed7244b6ce4d25f4d3abe410b2a66128292" "checksum tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d52d12ad79e4063e0cb0ca5efa202ed7244b6ce4d25f4d3abe410b2a66128292"
"checksum tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e85d419699ec4b71bfe35bbc25bb8771e52eff0471a7f75c853ad06e200b4f86" "checksum tokio-core 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e85d419699ec4b71bfe35bbc25bb8771e52eff0471a7f75c853ad06e200b4f86"
"checksum tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af" "checksum tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af"
"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>"
"checksum tokio-named-pipes 0.1.0 (git+https://github.com/nikvolf/tokio-named-pipes)" = "<none>" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/nikvolf/tokio-named-pipes)" = "<none>"
"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "<none>"
"checksum tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389" "checksum tokio-proto 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389"
"checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162"
"checksum tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6131e780037787ff1b3f8aad9da83bca02438b72277850dd6ad0d455e0e20efc" "checksum tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6131e780037787ff1b3f8aad9da83bca02438b72277850dd6ad0d455e0e20efc"
"checksum tokio-tls 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d88e411cac1c87e405e4090be004493c5d8072a370661033b1a64ea205ec2e13"
"checksum tokio-uds 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6116c71be48f8f1656551fd16458247fdd6c03201d7893ad81189055fcde03e8" "checksum tokio-uds 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6116c71be48f8f1656551fd16458247fdd6c03201d7893ad81189055fcde03e8"
"checksum toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "0590d72182e50e879c4da3b11c6488dae18fccb1ae0c7a3eda18e16795844796" "checksum toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "0590d72182e50e879c4da3b11c6488dae18fccb1ae0c7a3eda18e16795844796"
"checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" "checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e"

View File

@ -31,7 +31,7 @@ futures-cpupool = "0.1"
fdlimit = "0.1" fdlimit = "0.1"
ws2_32-sys = "0.2" ws2_32-sys = "0.2"
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
ethsync = { path = "sync" } ethsync = { path = "sync" }
ethcore = { path = "ethcore" } ethcore = { path = "ethcore" }
ethcore-util = { path = "util" } ethcore-util = { path = "util" }
@ -117,4 +117,4 @@ lto = false
panic = "abort" panic = "abort"
[workspace] [workspace]
members = ["ethstore/cli", "ethkey/cli", "evmbin", "whisper", "chainspec", "dapps/node-health"] members = ["ethstore/cli", "ethkey/cli", "evmbin", "whisper", "chainspec", "dapps/js-glue"]

View File

@ -10,25 +10,23 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
base32 = "0.3" base32 = "0.3"
futures = "0.1" futures = "0.1"
linked-hash-map = "0.3" futures-cpupool = "0.1"
linked-hash-map = "0.5"
log = "0.3" log = "0.3"
parity-dapps-glue = "1.7" parity-dapps-glue = "1.8"
parking_lot = "0.4" parking_lot = "0.4"
mime = "0.2" mime_guess = "2.0.0-alpha.2"
mime_guess = "1.6.1"
rand = "0.3" rand = "0.3"
rustc-hex = "1.0" rustc-hex = "1.0"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
time = "0.1.35" unicase = "1.4"
unicase = "1.3"
url = "1.0"
zip = { version = "0.1", default-features = false } zip = { version = "0.1", default-features = false }
itertools = "0.5" itertools = "0.5"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }

View File

@ -1,7 +1,7 @@
[package] [package]
description = "Base Package for all Parity built-in dapps" description = "Base Package for all Parity built-in dapps"
name = "parity-dapps-glue" name = "parity-dapps-glue"
version = "1.7.0" version = "1.8.0"
license = "GPL-3.0" license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs" build = "build.rs"
@ -12,7 +12,7 @@ syntex = { version = "0.58", optional = true }
[dependencies] [dependencies]
glob = { version = "0.2.11" } glob = { version = "0.2.11" }
mime_guess = { version = "1.6.1" } mime_guess = { version = "2.0.0-alpha.2" }
aster = { version = "0.41", default-features = false } aster = { version = "0.41", default-features = false }
quasi = { version = "0.32", default-features = false } quasi = { version = "0.32", default-features = false }
quasi_macros = { version = "0.32", optional = true } quasi_macros = { version = "0.32", optional = true }

View File

@ -18,7 +18,7 @@
use std::sync::Arc; use std::sync::Arc;
use std::time; use std::time;
use futures::{Future, BoxFuture}; use futures::Future;
use futures::sync::oneshot; use futures::sync::oneshot;
use types::{HealthInfo, HealthStatus, Health}; use types::{HealthInfo, HealthStatus, Health};
use time::{TimeChecker, MAX_DRIFT}; use time::{TimeChecker, MAX_DRIFT};
@ -44,7 +44,7 @@ impl NodeHealth {
} }
/// Query latest health report. /// Query latest health report.
pub fn health(&self) -> BoxFuture<Health, ()> { pub fn health(&self) -> Box<Future<Item = Health, Error = ()> + Send> {
trace!(target: "dapps", "Checking node health."); trace!(target: "dapps", "Checking node health.");
// Check timediff // Check timediff
let sync_status = self.sync_status.clone(); let sync_status = self.sync_status.clone();
@ -63,7 +63,7 @@ impl NodeHealth {
}, },
); );
rx.map_err(|err| { Box::new(rx.map_err(|err| {
warn!(target: "dapps", "Health request cancelled: {:?}", err); warn!(target: "dapps", "Health request cancelled: {:?}", err);
}).and_then(move |time| { }).and_then(move |time| {
// Check peers // Check peers
@ -117,6 +117,6 @@ impl NodeHealth {
}; };
Ok(Health { peers, sync, time}) Ok(Health { peers, sync, time})
}).boxed() }))
} }
} }

View File

@ -37,7 +37,7 @@ use std::collections::VecDeque;
use std::sync::atomic::{self, AtomicUsize}; use std::sync::atomic::{self, AtomicUsize};
use std::sync::Arc; use std::sync::Arc;
use futures::{self, Future, BoxFuture}; use futures::{self, Future};
use futures::future::{self, IntoFuture}; use futures::future::{self, IntoFuture};
use futures_cpupool::{CpuPool, CpuFuture}; use futures_cpupool::{CpuPool, CpuFuture};
use ntp; use ntp;
@ -195,6 +195,8 @@ const UPDATE_TIMEOUT_INCOMPLETE_SECS: u64 = 10;
/// Maximal valid time drift. /// Maximal valid time drift.
pub const MAX_DRIFT: i64 = 500; pub const MAX_DRIFT: i64 = 500;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
/// A time checker. /// A time checker.
pub struct TimeChecker<N: Ntp = SimpleNtp> { pub struct TimeChecker<N: Ntp = SimpleNtp> {
@ -224,7 +226,7 @@ impl<N: Ntp> TimeChecker<N> where <N::Future as IntoFuture>::Future: Send + 'sta
pub fn update(&self) -> BoxFuture<i64, Error> { pub fn update(&self) -> BoxFuture<i64, Error> {
trace!(target: "dapps", "Updating time from NTP."); trace!(target: "dapps", "Updating time from NTP.");
let last_result = self.last_result.clone(); let last_result = self.last_result.clone();
self.ntp.drift().into_future().then(move |res| { Box::new(self.ntp.drift().into_future().then(move |res| {
let res = res.map(|d| d.num_milliseconds()); let res = res.map(|d| d.num_milliseconds());
if let Err(Error::NoServersAvailable) = res { if let Err(Error::NoServersAvailable) = res {
@ -255,7 +257,7 @@ impl<N: Ntp> TimeChecker<N> where <N::Future as IntoFuture>::Future: Send + 'sta
let res = select_result(results.iter()); let res = select_result(results.iter());
*last_result.write() = (valid_till, results); *last_result.write() = (valid_till, results);
res res
}).boxed() }))
} }
/// Returns a current time drift or error if last request to NTP server failed. /// Returns a current time drift or error if last request to NTP server failed.
@ -264,7 +266,7 @@ impl<N: Ntp> TimeChecker<N> where <N::Future as IntoFuture>::Future: Send + 'sta
{ {
let res = self.last_result.read(); let res = self.last_result.read();
if res.0 > time::Instant::now() { if res.0 > time::Instant::now() {
return futures::done(select_result(res.1.iter())).boxed(); return Box::new(futures::done(select_result(res.1.iter())));
} }
} }
// or update and return result // or update and return result

View File

@ -16,144 +16,82 @@
use std::sync::Arc; use std::sync::Arc;
use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::{Method, StatusCode};
use hyper::method::Method;
use hyper::status::StatusCode;
use api::{response, types}; use api::response;
use apps::fetcher::Fetcher; use apps::fetcher::Fetcher;
use handlers::{self, extract_url}; use endpoint::{Endpoint, Request, Response, EndpointPath};
use endpoint::{Endpoint, Handler, EndpointPath}; use futures::{future, Future};
use node_health::{NodeHealth, HealthStatus, Health}; use node_health::{NodeHealth, HealthStatus};
use parity_reactor::Remote;
#[derive(Clone)] #[derive(Clone)]
pub struct RestApi { pub struct RestApi {
fetcher: Arc<Fetcher>, fetcher: Arc<Fetcher>,
health: NodeHealth, health: NodeHealth,
remote: Remote, }
impl Endpoint for RestApi {
fn respond(&self, mut path: EndpointPath, req: Request) -> Response {
if let Method::Options = *req.method() {
return Box::new(future::ok(response::empty()));
}
let endpoint = path.app_params.get(0).map(String::to_owned);
let hash = path.app_params.get(1).map(String::to_owned);
// at this point path.app_id contains 'api', adjust it to the hash properly, otherwise
// we will try and retrieve 'api' as the hash when doing the /api/content route
if let Some(ref hash) = hash {
path.app_id = hash.to_owned();
}
trace!(target: "dapps", "Handling /api request: {:?}/{:?}", endpoint, hash);
match endpoint.as_ref().map(String::as_str) {
Some("ping") => Box::new(future::ok(response::ping(req))),
Some("health") => self.health(),
Some("content") => self.resolve_content(hash.as_ref().map(String::as_str), path, req),
_ => Box::new(future::ok(response::not_found())),
}
}
} }
impl RestApi { impl RestApi {
pub fn new( pub fn new(
fetcher: Arc<Fetcher>, fetcher: Arc<Fetcher>,
health: NodeHealth, health: NodeHealth,
remote: Remote,
) -> Box<Endpoint> { ) -> Box<Endpoint> {
Box::new(RestApi { Box::new(RestApi {
fetcher, fetcher,
health, health,
remote,
}) })
} }
}
impl Endpoint for RestApi { fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, req: Request) -> Response {
fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<Handler> {
Box::new(RestApiRouter::new((*self).clone(), path, control))
}
}
struct RestApiRouter {
api: RestApi,
path: Option<EndpointPath>,
control: Option<Control>,
handler: Box<Handler>,
}
impl RestApiRouter {
fn new(api: RestApi, path: EndpointPath, control: Control) -> Self {
RestApiRouter {
path: Some(path),
control: Some(control),
api: api,
handler: Box::new(response::as_json_error(StatusCode::NotFound, &types::ApiError {
code: "404".into(),
title: "Not Found".into(),
detail: "Resource you requested has not been found.".into(),
})),
}
}
fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option<Box<Handler>> {
trace!(target: "dapps", "Resolving content: {:?} from path: {:?}", hash, path); trace!(target: "dapps", "Resolving content: {:?} from path: {:?}", hash, path);
match hash { match hash {
Some(hash) if self.api.fetcher.contains(hash) => { Some(hash) if self.fetcher.contains(hash) => {
Some(self.api.fetcher.to_async_handler(path, control)) self.fetcher.respond(path, req)
}, },
_ => None _ => Box::new(future::ok(response::not_found())),
} }
} }
fn health(&self, control: Control) -> Box<Handler> { fn health(&self) -> Response {
let map = move |health: Result<Result<Health, ()>, ()>| { Box::new(self.health.health()
let status = match health { .then(|health| {
Ok(Ok(ref health)) => { let status = match health {
if [&health.peers.status, &health.sync.status].iter().any(|x| *x != &HealthStatus::Ok) { Ok(ref health) => {
StatusCode::PreconditionFailed // HTTP 412 if [&health.peers.status, &health.sync.status].iter().any(|x| *x != &HealthStatus::Ok) {
} else { StatusCode::PreconditionFailed // HTTP 412
StatusCode::Ok // HTTP 200 } else {
} StatusCode::Ok // HTTP 200
}, }
_ => StatusCode::ServiceUnavailable, // HTTP 503 },
}; _ => StatusCode::ServiceUnavailable, // HTTP 503
};
response::as_json(status, &health) Ok(response::as_json(status, &health).into())
}; })
let health = self.api.health.health(); )
let remote = self.api.remote.clone();
Box::new(handlers::AsyncHandler::new(health, map, remote, control))
}
}
impl server::Handler<net::HttpStream> for RestApiRouter {
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next {
if let Method::Options = *request.method() {
self.handler = response::empty();
return Next::write();
}
// TODO [ToDr] Consider using `path.app_params` instead
let url = extract_url(&request);
if url.is_none() {
// Just return 404 if we can't parse URL
return Next::write();
}
let url = url.expect("Check for None early-exists above; qed");
let mut path = self.path.take().expect("on_request called only once, and path is always defined in new; qed");
let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed");
let endpoint = url.path.get(1).map(|v| v.as_str());
let hash = url.path.get(2).map(|v| v.as_str());
// at this point path.app_id contains 'api', adjust it to the hash properly, otherwise
// we will try and retrieve 'api' as the hash when doing the /api/content route
if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() }
let handler = endpoint.and_then(|v| match v {
"ping" => Some(response::ping()),
"health" => Some(self.health(control)),
"content" => self.resolve_content(hash, path, control),
_ => None
});
// Overwrite default
if let Some(h) = handler {
self.handler = h;
}
self.handler.on_request(request)
}
fn on_request_readable(&mut self, decoder: &mut Decoder<net::HttpStream>) -> Next {
self.handler.on_request_readable(decoder)
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
self.handler.on_response(res)
}
fn on_response_writable(&mut self, encoder: &mut Encoder<net::HttpStream>) -> Next {
self.handler.on_response_writable(encoder)
} }
} }

View File

@ -16,27 +16,28 @@
use serde::Serialize; use serde::Serialize;
use serde_json; use serde_json;
use hyper::status::StatusCode; use hyper::{self, mime, StatusCode};
use endpoint::Handler;
use handlers::{ContentHandler, EchoHandler}; use handlers::{ContentHandler, EchoHandler};
pub fn empty() -> Box<Handler> { pub fn empty() -> hyper::Response {
Box::new(ContentHandler::ok("".into(), mime!(Text/Plain))) ContentHandler::ok("".into(), mime::TEXT_PLAIN).into()
} }
pub fn as_json<T: Serialize>(status: StatusCode, val: &T) -> ContentHandler { pub fn as_json<T: Serialize>(status: StatusCode, val: &T) -> hyper::Response {
let json = serde_json::to_string(val) let json = serde_json::to_string(val)
.expect("serialization to string is infallible; qed"); .expect("serialization to string is infallible; qed");
ContentHandler::new(status, json, mime!(Application/Json)) ContentHandler::new(status, json, mime::APPLICATION_JSON).into()
} }
pub fn as_json_error<T: Serialize>(status: StatusCode, val: &T) -> ContentHandler { pub fn ping(req: hyper::Request) -> hyper::Response {
let json = serde_json::to_string(val) EchoHandler::new(req).into()
.expect("serialization to string is infallible; qed");
ContentHandler::new(status, json, mime!(Application/Json))
} }
pub fn ping() -> Box<Handler> { pub fn not_found() -> hyper::Response {
Box::new(EchoHandler::default()) as_json(StatusCode::NotFound, &::api::types::ApiError {
code: "404".into(),
title: "Not Found".into(),
detail: "Resource you requested has not been found.".into(),
})
} }

View File

@ -19,12 +19,12 @@
use std::fs; use std::fs;
use linked_hash_map::LinkedHashMap; use linked_hash_map::LinkedHashMap;
use page::LocalPageEndpoint; use page::local;
use handlers::FetchControl; use handlers::FetchControl;
pub enum ContentStatus { pub enum ContentStatus {
Fetching(FetchControl), Fetching(FetchControl),
Ready(LocalPageEndpoint), Ready(local::Dapp),
} }
#[derive(Default)] #[derive(Default)]

View File

@ -18,16 +18,17 @@ use zip;
use std::{fs, fmt}; use std::{fs, fmt};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::path::PathBuf; use std::path::PathBuf;
use fetch::{self, Mime};
use hash::keccak_buffer;
use bigint::hash::H256; use bigint::hash::H256;
use fetch::{self, Mime};
use futures_cpupool::CpuPool;
use hash::keccak_buffer;
use page::{LocalPageEndpoint, PageCache};
use handlers::{ContentValidator, ValidatorResponse};
use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest};
use handlers::{ContentValidator, ValidatorResponse};
use page::{local, PageCache};
use Embeddable; use Embeddable;
type OnDone = Box<Fn(Option<LocalPageEndpoint>) + Send>; type OnDone = Box<Fn(Option<local::Dapp>) + Send>;
fn write_response_and_check_hash( fn write_response_and_check_hash(
id: &str, id: &str,
@ -75,15 +76,17 @@ pub struct Content {
mime: Mime, mime: Mime,
content_path: PathBuf, content_path: PathBuf,
on_done: OnDone, on_done: OnDone,
pool: CpuPool,
} }
impl Content { impl Content {
pub fn new(id: String, mime: Mime, content_path: PathBuf, on_done: OnDone) -> Self { pub fn new(id: String, mime: Mime, content_path: PathBuf, on_done: OnDone, pool: CpuPool) -> Self {
Content { Content {
id: id, id,
mime: mime, mime,
content_path: content_path, content_path,
on_done: on_done, on_done,
pool,
} }
} }
} }
@ -91,12 +94,15 @@ impl Content {
impl ContentValidator for Content { impl ContentValidator for Content {
type Error = ValidationError; type Error = ValidationError;
fn validate_and_install(&self, response: fetch::Response) -> Result<ValidatorResponse, ValidationError> { fn validate_and_install(self, response: fetch::Response) -> Result<ValidatorResponse, ValidationError> {
let validate = |content_path: PathBuf| { let pool = self.pool;
let id = self.id.clone();
let mime = self.mime;
let validate = move |content_path: PathBuf| {
// Create dir // Create dir
let (_, content_path) = write_response_and_check_hash(self.id.as_str(), content_path.clone(), self.id.as_str(), response)?; let (_, content_path) = write_response_and_check_hash(&id, content_path, &id, response)?;
Ok(LocalPageEndpoint::single_file(content_path, self.mime.clone(), PageCache::Enabled)) Ok(local::Dapp::single_file(pool, content_path, mime, PageCache::Enabled))
}; };
// Prepare path for a file // Prepare path for a file
@ -118,15 +124,17 @@ pub struct Dapp {
dapps_path: PathBuf, dapps_path: PathBuf,
on_done: OnDone, on_done: OnDone,
embeddable_on: Embeddable, embeddable_on: Embeddable,
pool: CpuPool,
} }
impl Dapp { impl Dapp {
pub fn new(id: String, dapps_path: PathBuf, on_done: OnDone, embeddable_on: Embeddable) -> Self { pub fn new(id: String, dapps_path: PathBuf, on_done: OnDone, embeddable_on: Embeddable, pool: CpuPool) -> Self {
Dapp { Dapp {
id, id,
dapps_path, dapps_path,
on_done, on_done,
embeddable_on, embeddable_on,
pool,
} }
} }
@ -158,16 +166,19 @@ impl Dapp {
impl ContentValidator for Dapp { impl ContentValidator for Dapp {
type Error = ValidationError; type Error = ValidationError;
fn validate_and_install(&self, response: fetch::Response) -> Result<ValidatorResponse, ValidationError> { fn validate_and_install(self, response: fetch::Response) -> Result<ValidatorResponse, ValidationError> {
let validate = |dapp_path: PathBuf| { let id = self.id.clone();
let (file, zip_path) = write_response_and_check_hash(self.id.as_str(), dapp_path.clone(), &format!("{}.zip", self.id), response)?; let pool = self.pool;
let embeddable_on = self.embeddable_on;
let validate = move |dapp_path: PathBuf| {
let (file, zip_path) = write_response_and_check_hash(&id, dapp_path.clone(), &format!("{}.zip", id), response)?;
trace!(target: "dapps", "Opening dapp bundle at {:?}", zip_path); trace!(target: "dapps", "Opening dapp bundle at {:?}", zip_path);
// Unpack archive // Unpack archive
let mut zip = zip::ZipArchive::new(file)?; let mut zip = zip::ZipArchive::new(file)?;
// First find manifest file // First find manifest file
let (mut manifest, manifest_dir) = Self::find_manifest(&mut zip)?; let (mut manifest, manifest_dir) = Self::find_manifest(&mut zip)?;
// Overwrite id to match hash // Overwrite id to match hash
manifest.id = self.id.clone(); manifest.id = id;
// Unpack zip // Unpack zip
for i in 0..zip.len() { for i in 0..zip.len() {
@ -198,7 +209,7 @@ impl ContentValidator for Dapp {
let mut manifest_file = fs::File::create(manifest_path)?; let mut manifest_file = fs::File::create(manifest_path)?;
manifest_file.write_all(manifest_str.as_bytes())?; manifest_file.write_all(manifest_str.as_bytes())?;
// Create endpoint // Create endpoint
let endpoint = LocalPageEndpoint::new(dapp_path, manifest.clone().into(), PageCache::Enabled, self.embeddable_on.clone()); let endpoint = local::Dapp::new(pool, dapp_path, manifest.into(), PageCache::Enabled, embeddable_on);
Ok(endpoint) Ok(endpoint)
}; };

View File

@ -24,27 +24,25 @@ use std::{fs, env};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use rustc_hex::FromHex; use rustc_hex::FromHex;
use futures::{future, Future};
use futures_cpupool::CpuPool;
use fetch::{Client as FetchClient, Fetch}; use fetch::{Client as FetchClient, Fetch};
use hash_fetch::urlhint::{URLHintContract, URLHint, URLHintResult}; use hash_fetch::urlhint::{URLHintContract, URLHint, URLHintResult};
use parity_reactor::Remote;
use hyper; use hyper::StatusCode;
use hyper::status::StatusCode;
use {Embeddable, SyncStatus, random_filename}; use {Embeddable, SyncStatus, random_filename};
use parking_lot::Mutex; use parking_lot::Mutex;
use page::LocalPageEndpoint; use page::local;
use handlers::{ContentHandler, ContentFetcherHandler}; use handlers::{ContentHandler, ContentFetcherHandler};
use endpoint::{Endpoint, EndpointPath, Handler}; use endpoint::{self, Endpoint, EndpointPath};
use apps::cache::{ContentCache, ContentStatus}; use apps::cache::{ContentCache, ContentStatus};
/// Limit of cached dapps/content /// Limit of cached dapps/content
const MAX_CACHED_DAPPS: usize = 20; const MAX_CACHED_DAPPS: usize = 20;
pub trait Fetcher: Send + Sync + 'static { pub trait Fetcher: Endpoint + 'static {
fn contains(&self, content_id: &str) -> bool; fn contains(&self, content_id: &str) -> bool;
fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler>;
} }
pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHintContract> { pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHintContract> {
@ -53,8 +51,8 @@ pub struct ContentFetcher<F: Fetch = FetchClient, R: URLHint + 'static = URLHint
cache: Arc<Mutex<ContentCache>>, cache: Arc<Mutex<ContentCache>>,
sync: Arc<SyncStatus>, sync: Arc<SyncStatus>,
embeddable_on: Embeddable, embeddable_on: Embeddable,
remote: Remote,
fetch: F, fetch: F,
pool: CpuPool,
only_content: bool, only_content: bool,
} }
@ -66,24 +64,23 @@ impl<R: URLHint + 'static, F: Fetch> Drop for ContentFetcher<F, R> {
} }
impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> { impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
pub fn new( pub fn new(
resolver: R, resolver: R,
sync_status: Arc<SyncStatus>, sync: Arc<SyncStatus>,
remote: Remote,
fetch: F, fetch: F,
pool: CpuPool,
) -> Self { ) -> Self {
let mut cache_path = env::temp_dir(); let mut cache_path = env::temp_dir();
cache_path.push(random_filename()); cache_path.push(random_filename());
ContentFetcher { ContentFetcher {
cache_path: cache_path, cache_path,
resolver: resolver, resolver,
sync: sync_status, sync,
cache: Arc::new(Mutex::new(ContentCache::default())), cache: Arc::new(Mutex::new(ContentCache::default())),
embeddable_on: None, embeddable_on: None,
remote: remote, fetch,
fetch: fetch, pool,
only_content: true, only_content: true,
} }
} }
@ -98,24 +95,34 @@ impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
self self
} }
fn still_syncing(embeddable: Embeddable) -> Box<Handler> { fn not_found(embeddable: Embeddable) -> endpoint::Response {
Box::new(ContentHandler::error( Box::new(future::ok(ContentHandler::error(
StatusCode::NotFound,
"Resource Not Found",
"Requested resource was not found.",
None,
embeddable,
).into()))
}
fn still_syncing(embeddable: Embeddable) -> endpoint::Response {
Box::new(future::ok(ContentHandler::error(
StatusCode::ServiceUnavailable, StatusCode::ServiceUnavailable,
"Sync In Progress", "Sync In Progress",
"Your node is still syncing. We cannot resolve any content before it's fully synced.", "Your node is still syncing. We cannot resolve any content before it's fully synced.",
Some("<a href=\"javascript:window.location.reload()\">Refresh</a>"), Some("<a href=\"javascript:window.location.reload()\">Refresh</a>"),
embeddable, embeddable,
)) ).into()))
} }
fn dapps_disabled(address: Embeddable) -> Box<Handler> { fn dapps_disabled(address: Embeddable) -> endpoint::Response {
Box::new(ContentHandler::error( Box::new(future::ok(ContentHandler::error(
StatusCode::ServiceUnavailable, StatusCode::ServiceUnavailable,
"Network Dapps Not Available", "Network Dapps Not Available",
"This interface doesn't support network dapps for security reasons.", "This interface doesn't support network dapps for security reasons.",
None, None,
address, address,
)) ).into()))
} }
#[cfg(test)] #[cfg(test)]
@ -126,8 +133,6 @@ impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
// resolve contract call synchronously. // resolve contract call synchronously.
// TODO: port to futures-based hyper and make it all async. // TODO: port to futures-based hyper and make it all async.
fn resolve(&self, content_id: Vec<u8>) -> Option<URLHintResult> { fn resolve(&self, content_id: Vec<u8>) -> Option<URLHintResult> {
use futures::Future;
self.resolver.resolve(content_id) self.resolver.resolve(content_id)
.wait() .wait()
.unwrap_or_else(|e| { warn!("Error resolving content-id: {}", e); None }) .unwrap_or_else(|e| { warn!("Error resolving content-id: {}", e); None })
@ -151,8 +156,10 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
false false
} }
} }
}
fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler> { impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
fn respond(&self, path: EndpointPath, req: endpoint::Request) -> endpoint::Response {
let mut cache = self.cache.lock(); let mut cache = self.cache.lock();
let content_id = path.app_id.clone(); let content_id = path.app_id.clone();
@ -161,12 +168,12 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
match status { match status {
// Just serve the content // Just serve the content
Some(&mut ContentStatus::Ready(ref endpoint)) => { Some(&mut ContentStatus::Ready(ref endpoint)) => {
(None, endpoint.to_async_handler(path, control)) (None, endpoint.to_response(&path))
}, },
// Content is already being fetched // Content is already being fetched
Some(&mut ContentStatus::Fetching(ref fetch_control)) if !fetch_control.is_deadline_reached() => { Some(&mut ContentStatus::Fetching(ref fetch_control)) if !fetch_control.is_deadline_reached() => {
trace!(target: "dapps", "Content fetching in progress. Waiting..."); trace!(target: "dapps", "Content fetching in progress. Waiting...");
(None, fetch_control.to_async_handler(path, control)) (None, fetch_control.to_response(path))
}, },
// We need to start fetching the content // We need to start fetching the content
_ => { _ => {
@ -176,7 +183,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
let cache = self.cache.clone(); let cache = self.cache.clone();
let id = content_id.clone(); let id = content_id.clone();
let on_done = move |result: Option<LocalPageEndpoint>| { let on_done = move |result: Option<local::Dapp>| {
let mut cache = cache.lock(); let mut cache = cache.lock();
match result { match result {
Some(endpoint) => cache.insert(id.clone(), ContentStatus::Ready(endpoint)), Some(endpoint) => cache.insert(id.clone(), ContentStatus::Ready(endpoint)),
@ -195,39 +202,39 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
}, },
Some(URLHintResult::Dapp(dapp)) => { Some(URLHintResult::Dapp(dapp)) => {
let handler = ContentFetcherHandler::new( let handler = ContentFetcherHandler::new(
dapp.url(), req.method(),
&dapp.url(),
path, path,
control,
installers::Dapp::new( installers::Dapp::new(
content_id.clone(), content_id.clone(),
self.cache_path.clone(), self.cache_path.clone(),
Box::new(on_done), Box::new(on_done),
self.embeddable_on.clone(), self.embeddable_on.clone(),
self.pool.clone(),
), ),
self.embeddable_on.clone(), self.embeddable_on.clone(),
self.remote.clone(),
self.fetch.clone(), self.fetch.clone(),
); );
(Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as Box<Handler>) (Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as endpoint::Response)
}, },
Some(URLHintResult::Content(content)) => { Some(URLHintResult::Content(content)) => {
let handler = ContentFetcherHandler::new( let handler = ContentFetcherHandler::new(
content.url, req.method(),
&content.url,
path, path,
control,
installers::Content::new( installers::Content::new(
content_id.clone(), content_id.clone(),
content.mime, content.mime,
self.cache_path.clone(), self.cache_path.clone(),
Box::new(on_done), Box::new(on_done),
self.pool.clone(),
), ),
self.embeddable_on.clone(), self.embeddable_on.clone(),
self.remote.clone(),
self.fetch.clone(), self.fetch.clone(),
); );
(Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as Box<Handler>) (Some(ContentStatus::Fetching(handler.fetch_control())), Box::new(handler) as endpoint::Response)
}, },
None if self.sync.is_major_importing() => { None if self.sync.is_major_importing() => {
(None, Self::still_syncing(self.embeddable_on.clone())) (None, Self::still_syncing(self.embeddable_on.clone()))
@ -235,13 +242,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
None => { None => {
// This may happen when sync status changes in between // This may happen when sync status changes in between
// `contains` and `to_handler` // `contains` and `to_handler`
(None, Box::new(ContentHandler::error( (None, Self::not_found(self.embeddable_on.clone()))
StatusCode::NotFound,
"Resource Not Found",
"Requested resource was not found.",
None,
self.embeddable_on.clone(),
)) as Box<Handler>)
}, },
} }
}, },
@ -263,13 +264,12 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use bytes::Bytes; use bytes::Bytes;
use fetch::{Fetch, Client}; use fetch::{Fetch, Client};
use futures::{future, Future, BoxFuture}; use futures::future;
use hash_fetch::urlhint::{URLHint, URLHintResult}; use hash_fetch::urlhint::{URLHint, URLHintResult, BoxFuture};
use parity_reactor::Remote;
use apps::cache::ContentStatus; use apps::cache::ContentStatus;
use endpoint::EndpointInfo; use endpoint::EndpointInfo;
use page::LocalPageEndpoint; use page::local;
use super::{ContentFetcher, Fetcher}; use super::{ContentFetcher, Fetcher};
use {SyncStatus}; use {SyncStatus};
@ -277,7 +277,7 @@ mod tests {
struct FakeResolver; struct FakeResolver;
impl URLHint for FakeResolver { impl URLHint for FakeResolver {
fn resolve(&self, _id: Bytes) -> BoxFuture<Option<URLHintResult>, String> { fn resolve(&self, _id: Bytes) -> BoxFuture<Option<URLHintResult>, String> {
future::ok(None).boxed() Box::new(future::ok(None))
} }
} }
@ -291,10 +291,16 @@ mod tests {
#[test] #[test]
fn should_true_if_contains_the_app() { fn should_true_if_contains_the_app() {
// given // given
let pool = ::futures_cpupool::CpuPool::new(1);
let path = env::temp_dir(); let path = env::temp_dir();
let fetcher = ContentFetcher::new(FakeResolver, Arc::new(FakeSync(false)), Remote::new_sync(), Client::new().unwrap()) let fetcher = ContentFetcher::new(
.allow_dapps(true); FakeResolver,
let handler = LocalPageEndpoint::new(path, EndpointInfo { Arc::new(FakeSync(false)),
Client::new().unwrap(),
pool.clone(),
).allow_dapps(true);
let handler = local::Dapp::new(pool, path, EndpointInfo {
name: "fake".into(), name: "fake".into(),
description: "".into(), description: "".into(),
version: "".into(), version: "".into(),

View File

@ -19,9 +19,11 @@ use std::io;
use std::io::Read; use std::io::Read;
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use page::{LocalPageEndpoint, PageCache}; use futures_cpupool::CpuPool;
use endpoint::{Endpoint, EndpointInfo};
use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest};
use endpoint::{Endpoint, EndpointInfo};
use page::{local, PageCache};
use Embeddable; use Embeddable;
struct LocalDapp { struct LocalDapp {
@ -61,14 +63,14 @@ fn read_manifest(name: &str, mut path: PathBuf) -> EndpointInfo {
/// Returns Dapp Id and Local Dapp Endpoint for given filesystem path. /// Returns Dapp Id and Local Dapp Endpoint for given filesystem path.
/// Parses the path to extract last component (for name). /// Parses the path to extract last component (for name).
/// `None` is returned when path is invalid or non-existent. /// `None` is returned when path is invalid or non-existent.
pub fn local_endpoint<P: AsRef<Path>>(path: P, embeddable: Embeddable) -> Option<(String, Box<LocalPageEndpoint>)> { pub fn local_endpoint<P: AsRef<Path>>(path: P, embeddable: Embeddable, pool: CpuPool) -> Option<(String, Box<local::Dapp>)> {
let path = path.as_ref().to_owned(); let path = path.as_ref().to_owned();
path.canonicalize().ok().and_then(|path| { path.canonicalize().ok().and_then(|path| {
let name = path.file_name().and_then(|name| name.to_str()); let name = path.file_name().and_then(|name| name.to_str());
name.map(|name| { name.map(|name| {
let dapp = local_dapp(name.into(), path.clone()); let dapp = local_dapp(name.into(), path.clone());
(dapp.id, Box::new(LocalPageEndpoint::new( (dapp.id, Box::new(local::Dapp::new(
dapp.path, dapp.info, PageCache::Disabled, embeddable.clone()) pool.clone(), dapp.path, dapp.info, PageCache::Disabled, embeddable.clone())
)) ))
}) })
}) })
@ -86,13 +88,13 @@ fn local_dapp(name: String, path: PathBuf) -> LocalDapp {
} }
/// Returns endpoints for Local Dapps found for given filesystem path. /// Returns endpoints for Local Dapps found for given filesystem path.
/// Scans the directory and collects `LocalPageEndpoints`. /// Scans the directory and collects `local::Dapp`.
pub fn local_endpoints<P: AsRef<Path>>(dapps_path: P, embeddable: Embeddable) -> BTreeMap<String, Box<Endpoint>> { pub fn local_endpoints<P: AsRef<Path>>(dapps_path: P, embeddable: Embeddable, pool: CpuPool) -> BTreeMap<String, Box<Endpoint>> {
let mut pages = BTreeMap::<String, Box<Endpoint>>::new(); let mut pages = BTreeMap::<String, Box<Endpoint>>::new();
for dapp in local_dapps(dapps_path.as_ref()) { for dapp in local_dapps(dapps_path.as_ref()) {
pages.insert( pages.insert(
dapp.id, dapp.id,
Box::new(LocalPageEndpoint::new(dapp.path, dapp.info, PageCache::Disabled, embeddable.clone())) Box::new(local::Dapp::new(pool.clone(), dapp.path, dapp.info, PageCache::Disabled, embeddable.clone()))
); );
} }
pages pages

View File

@ -18,12 +18,12 @@ use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use endpoint::{Endpoints, Endpoint}; use endpoint::{Endpoints, Endpoint};
use page::PageEndpoint; use futures_cpupool::CpuPool;
use page;
use proxypac::ProxyPac; use proxypac::ProxyPac;
use web::Web; use web::Web;
use fetch::Fetch; use fetch::Fetch;
use parity_dapps::WebApp; use parity_dapps::WebApp;
use parity_reactor::Remote;
use parity_ui; use parity_ui;
use {WebProxyTokens, ParentFrameSettings}; use {WebProxyTokens, ParentFrameSettings};
@ -43,12 +43,12 @@ pub const UTILS_PATH: &'static str = "parity-utils";
pub const WEB_PATH: &'static str = "web"; pub const WEB_PATH: &'static str = "web";
pub const URL_REFERER: &'static str = "__referer="; pub const URL_REFERER: &'static str = "__referer=";
pub fn utils() -> Box<Endpoint> { pub fn utils(pool: CpuPool) -> Box<Endpoint> {
Box::new(PageEndpoint::with_prefix(parity_ui::App::default(), UTILS_PATH.to_owned())) Box::new(page::builtin::Dapp::new(pool, parity_ui::App::default()))
} }
pub fn ui() -> Box<Endpoint> { pub fn ui(pool: CpuPool) -> Box<Endpoint> {
Box::new(PageEndpoint::with_fallback_to_index(parity_ui::App::default())) Box::new(page::builtin::Dapp::with_fallback_to_index(pool, parity_ui::App::default()))
} }
pub fn ui_redirection(embeddable: Option<ParentFrameSettings>) -> Box<Endpoint> { pub fn ui_redirection(embeddable: Option<ParentFrameSettings>) -> Box<Endpoint> {
@ -61,14 +61,14 @@ pub fn all_endpoints<F: Fetch>(
dapps_domain: &str, dapps_domain: &str,
embeddable: Option<ParentFrameSettings>, embeddable: Option<ParentFrameSettings>,
web_proxy_tokens: Arc<WebProxyTokens>, web_proxy_tokens: Arc<WebProxyTokens>,
remote: Remote,
fetch: F, fetch: F,
pool: CpuPool,
) -> (Vec<String>, Endpoints) { ) -> (Vec<String>, Endpoints) {
// fetch fs dapps at first to avoid overwriting builtins // fetch fs dapps at first to avoid overwriting builtins
let mut pages = fs::local_endpoints(dapps_path.clone(), embeddable.clone()); let mut pages = fs::local_endpoints(dapps_path.clone(), embeddable.clone(), pool.clone());
let local_endpoints: Vec<String> = pages.keys().cloned().collect(); let local_endpoints: Vec<String> = pages.keys().cloned().collect();
for path in extra_dapps { for path in extra_dapps {
if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone()) { if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone(), pool.clone()) {
pages.insert(id, endpoint); pages.insert(id, endpoint);
} else { } else {
warn!(target: "dapps", "Ignoring invalid dapp at {}", path.display()); warn!(target: "dapps", "Ignoring invalid dapp at {}", path.display());
@ -76,17 +76,17 @@ pub fn all_endpoints<F: Fetch>(
} }
// NOTE [ToDr] Dapps will be currently embeded on 8180 // NOTE [ToDr] Dapps will be currently embeded on 8180
insert::<parity_ui::App>(&mut pages, "ui", Embeddable::Yes(embeddable.clone())); insert::<parity_ui::App>(&mut pages, "ui", Embeddable::Yes(embeddable.clone()), pool.clone());
pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned())); pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned()));
pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone())); pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), fetch.clone()));
(local_endpoints, pages) (local_endpoints, pages)
} }
fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str, embed_at: Embeddable) { fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str, embed_at: Embeddable, pool: CpuPool) {
pages.insert(id.to_owned(), Box::new(match embed_at { pages.insert(id.to_owned(), Box::new(match embed_at {
Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address), Embeddable::Yes(address) => page::builtin::Dapp::new_safe_to_embed(pool, T::default(), address),
Embeddable::No => PageEndpoint::new(T::default()), Embeddable::No => page::builtin::Dapp::new(pool, T::default()),
})); }));
} }

View File

@ -16,9 +16,10 @@
//! UI redirections //! UI redirections
use hyper::{Control, StatusCode}; use hyper::StatusCode;
use futures::future;
use endpoint::{Endpoint, Handler, EndpointPath}; use endpoint::{Endpoint, Request, Response, EndpointPath};
use {handlers, Embeddable}; use {handlers, Embeddable};
/// Redirection to UI server. /// Redirection to UI server.
@ -37,19 +38,20 @@ impl Redirection {
} }
impl Endpoint for Redirection { impl Endpoint for Redirection {
fn to_async_handler(&self, _path: EndpointPath, _control: Control) -> Box<Handler> { fn respond(&self, _path: EndpointPath, req: Request) -> Response {
if let Some(ref frame) = self.embeddable_on { Box::new(future::ok(if let Some(ref frame) = self.embeddable_on {
trace!(target: "dapps", "Redirecting to signer interface."); trace!(target: "dapps", "Redirecting to signer interface.");
handlers::Redirection::boxed(&format!("http://{}:{}", &frame.host, frame.port)) let protocol = req.uri().scheme().unwrap_or("http");
handlers::Redirection::new(format!("{}://{}:{}", protocol, &frame.host, frame.port)).into()
} else { } else {
trace!(target: "dapps", "Signer disabled, returning 404."); trace!(target: "dapps", "Signer disabled, returning 404.");
Box::new(handlers::ContentHandler::error( handlers::ContentHandler::error(
StatusCode::NotFound, StatusCode::NotFound,
"404 Not Found", "404 Not Found",
"Your homepage is not available when Trusted Signer is disabled.", "Your homepage is not available when Trusted Signer is disabled.",
Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."),
None, None,
)) ).into()
} }))
} }
} }

View File

@ -18,17 +18,25 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use hyper::{self, server, net}; use jsonrpc_core::BoxFuture;
use hyper;
#[derive(Debug, PartialEq, Default, Clone)] #[derive(Debug, PartialEq, Default, Clone)]
pub struct EndpointPath { pub struct EndpointPath {
pub app_id: String, pub app_id: String,
pub app_params: Vec<String>, pub app_params: Vec<String>,
pub query: Option<String>,
pub host: String, pub host: String,
pub port: u16, pub port: u16,
pub using_dapps_domains: bool, pub using_dapps_domains: bool,
} }
impl EndpointPath {
pub fn has_no_params(&self) -> bool {
self.app_params.is_empty() || self.app_params.iter().all(|x| x.is_empty())
}
}
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct EndpointInfo { pub struct EndpointInfo {
pub name: String, pub name: String,
@ -39,16 +47,11 @@ pub struct EndpointInfo {
} }
pub type Endpoints = BTreeMap<String, Box<Endpoint>>; pub type Endpoints = BTreeMap<String, Box<Endpoint>>;
pub type Handler = server::Handler<net::HttpStream> + Send; pub type Response = BoxFuture<hyper::Response, hyper::Error>;
pub type Request = hyper::Request;
pub trait Endpoint : Send + Sync { pub trait Endpoint : Send + Sync {
fn info(&self) -> Option<&EndpointInfo> { None } fn info(&self) -> Option<&EndpointInfo> { None }
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> { fn respond(&self, path: EndpointPath, req: Request) -> Response;
panic!("This Endpoint is asynchronous and requires Control object.");
}
fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box<Handler> {
self.to_handler(path)
}
} }

View File

@ -1,112 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Async Content Handler
//! Temporary solution until we switch to future-based server.
//! Wraps a future and converts it to hyper::server::Handler;
use std::{mem, time};
use std::sync::mpsc;
use futures::Future;
use hyper::{server, Decoder, Encoder, Next, Control};
use hyper::net::HttpStream;
use handlers::ContentHandler;
use parity_reactor::Remote;
const TIMEOUT_SECS: u64 = 15;
enum State<F, T, M> {
Initial(F, M, Remote, Control),
Waiting(mpsc::Receiver<Result<T, ()>>, M),
Done(ContentHandler),
Invalid,
}
pub struct AsyncHandler<F, T, M> {
state: State<F, T, M>,
}
impl<F, T, M> AsyncHandler<F, T, M> {
pub fn new(future: F, map: M, remote: Remote, control: Control) -> Self {
AsyncHandler {
state: State::Initial(future, map, remote, control),
}
}
}
impl<F, T, E, M> server::Handler<HttpStream> for AsyncHandler<F, Result<T, E>, M> where
F: Future<Item=T, Error=E> + Send + 'static,
M: FnOnce(Result<Result<T, E>, ()>) -> ContentHandler,
T: Send + 'static,
E: Send + 'static,
{
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
if let State::Initial(future, map, remote, control) = mem::replace(&mut self.state, State::Invalid) {
let (tx, rx) = mpsc::sync_channel(1);
let control2 = control.clone();
let tx2 = tx.clone();
remote.spawn_with_timeout(move || future.then(move |result| {
// Send a result (ignore errors if the connection was dropped)
let _ = tx.send(Ok(result));
// Resume handler
let _ = control.ready(Next::read());
Ok(())
}), time::Duration::from_secs(TIMEOUT_SECS), move || {
// Notify about error
let _ = tx2.send(Err(()));
// Resume handler
let _ = control2.ready(Next::read());
});
self.state = State::Waiting(rx, map);
}
Next::wait()
}
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
if let State::Waiting(rx, map) = mem::replace(&mut self.state, State::Invalid) {
match rx.try_recv() {
Ok(result) => {
self.state = State::Done(map(result));
},
Err(err) => {
warn!("Resuming handler in incorrect state: {:?}", err);
}
}
}
Next::write()
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
if let State::Done(ref mut handler) = self.state {
handler.on_response(res)
} else {
Next::end()
}
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
if let State::Done(ref mut handler) = self.state {
handler.on_response_writable(encoder)
} else {
Next::end()
}
}
}

View File

@ -16,32 +16,29 @@
//! Simple Content Handler //! Simple Content Handler
use hyper::{header, server, Decoder, Encoder, Next}; use hyper::{self, mime, header};
use hyper::net::HttpStream; use hyper::StatusCode;
use hyper::mime::Mime;
use hyper::status::StatusCode;
use util::version; use util::version;
use handlers::add_security_headers; use handlers::add_security_headers;
use Embeddable; use Embeddable;
#[derive(Clone)] #[derive(Debug, Clone)]
pub struct ContentHandler { pub struct ContentHandler {
code: StatusCode, code: StatusCode,
content: String, content: String,
mimetype: Mime, mimetype: mime::Mime,
write_pos: usize,
safe_to_embed_on: Embeddable, safe_to_embed_on: Embeddable,
} }
impl ContentHandler { impl ContentHandler {
pub fn ok(content: String, mimetype: Mime) -> Self { pub fn ok(content: String, mimetype: mime::Mime) -> Self {
Self::new(StatusCode::Ok, content, mimetype) Self::new(StatusCode::Ok, content, mimetype)
} }
pub fn html(code: StatusCode, content: String, embeddable_on: Embeddable) -> Self { pub fn html(code: StatusCode, content: String, embeddable_on: Embeddable) -> Self {
Self::new_embeddable(code, content, mime!(Text/Html), embeddable_on) Self::new_embeddable(code, content, mime::TEXT_HTML, embeddable_on)
} }
pub fn error( pub fn error(
@ -60,57 +57,32 @@ impl ContentHandler {
), embeddable_on) ), embeddable_on)
} }
pub fn new(code: StatusCode, content: String, mimetype: Mime) -> Self { pub fn new(code: StatusCode, content: String, mimetype: mime::Mime) -> Self {
Self::new_embeddable(code, content, mimetype, None) Self::new_embeddable(code, content, mimetype, None)
} }
pub fn new_embeddable( pub fn new_embeddable(
code: StatusCode, code: StatusCode,
content: String, content: String,
mimetype: Mime, mimetype: mime::Mime,
safe_to_embed_on: Embeddable, safe_to_embed_on: Embeddable,
) -> Self { ) -> Self {
ContentHandler { ContentHandler {
code, code,
content, content,
mimetype, mimetype,
write_pos: 0,
safe_to_embed_on, safe_to_embed_on,
} }
} }
} }
impl server::Handler<HttpStream> for ContentHandler { impl Into<hyper::Response> for ContentHandler {
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next { fn into(self) -> hyper::Response {
Next::write() let mut res = hyper::Response::new()
} .with_status(self.code)
.with_header(header::ContentType(self.mimetype))
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next { .with_body(self.content);
Next::write() add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on);
} res
fn on_response(&mut self, res: &mut server::Response) -> Next {
res.set_status(self.code);
res.headers_mut().set(header::ContentType(self.mimetype.clone()));
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.take());
Next::write()
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
let bytes = self.content.as_bytes();
if self.write_pos == bytes.len() {
return Next::end();
}
match encoder.write(&bytes[self.write_pos..]) {
Ok(bytes) => {
self.write_pos += bytes;
Next::write()
},
Err(e) => match e.kind() {
::std::io::ErrorKind::WouldBlock => Next::write(),
_ => Next::end()
},
}
} }
} }

View File

@ -16,45 +16,31 @@
//! Echo Handler //! Echo Handler
use std::io::Read; use hyper::{self, header};
use hyper::{server, Decoder, Encoder, Next};
use hyper::net::HttpStream;
use super::ContentHandler;
#[derive(Default)] use handlers::add_security_headers;
#[derive(Debug)]
pub struct EchoHandler { pub struct EchoHandler {
content: String, request: hyper::Request,
handler: Option<ContentHandler>,
} }
impl server::Handler<HttpStream> for EchoHandler { impl EchoHandler {
fn on_request(&mut self, _: server::Request<HttpStream>) -> Next { pub fn new(request: hyper::Request) -> Self {
Next::read() EchoHandler {
} request,
fn on_request_readable(&mut self, decoder: &mut Decoder<HttpStream>) -> Next {
match decoder.read_to_string(&mut self.content) {
Ok(0) => {
self.handler = Some(ContentHandler::ok(self.content.clone(), mime!(Application/Json)));
Next::write()
},
Ok(_) => Next::read(),
Err(e) => match e.kind() {
::std::io::ErrorKind::WouldBlock => Next::read(),
_ => Next::end(),
}
} }
} }
}
fn on_response(&mut self, res: &mut server::Response) -> Next { impl Into<hyper::Response> for EchoHandler {
self.handler.as_mut() fn into(self) -> hyper::Response {
.expect("handler always set in on_request, which is before now; qed") let content_type = self.request.headers().get().cloned();
.on_response(res) let mut res = hyper::Response::new()
} .with_header(content_type.unwrap_or(header::ContentType::json()))
.with_body(self.request.body());
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next { add_security_headers(res.headers_mut(), None);
self.handler.as_mut() res
.expect("handler always set in on_request, which is before now; qed")
.on_response_writable(encoder)
} }
} }

View File

@ -16,57 +16,39 @@
//! Hyper Server Handler that fetches a file during a request (proxy). //! Hyper Server Handler that fetches a file during a request (proxy).
use std::fmt; use std::{fmt, mem};
use std::sync::{mpsc, Arc}; use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
use fetch::{self, Fetch}; use fetch::{self, Fetch};
use futures::Future; use futures::sync::oneshot;
use parity_reactor::Remote; use futures::{self, Future};
use hyper::{self, Method, StatusCode};
use jsonrpc_core::BoxFuture;
use parking_lot::Mutex; use parking_lot::Mutex;
use hyper::{server, Decoder, Encoder, Next, Method, Control}; use endpoint::{self, EndpointPath};
use hyper::net::HttpStream;
use hyper::uri::RequestUri;
use hyper::status::StatusCode;
use endpoint::EndpointPath;
use handlers::{ContentHandler, StreamingHandler}; use handlers::{ContentHandler, StreamingHandler};
use page::{LocalPageEndpoint, PageHandlerWaiting}; use page::local;
use {Embeddable}; use {Embeddable};
const FETCH_TIMEOUT: u64 = 300; const FETCH_TIMEOUT: u64 = 300;
pub enum ValidatorResponse { pub enum ValidatorResponse {
Local(LocalPageEndpoint), Local(local::Dapp),
Streaming(StreamingHandler<fetch::Response>), Streaming(StreamingHandler<fetch::Response>),
} }
pub trait ContentValidator: Send + 'static { pub trait ContentValidator: Sized + Send + 'static {
type Error: fmt::Debug + fmt::Display; type Error: fmt::Debug + fmt::Display;
fn validate_and_install(&self, fetch::Response) -> Result<ValidatorResponse, Self::Error>; fn validate_and_install(self, fetch::Response) -> Result<ValidatorResponse, Self::Error>;
} }
enum FetchState { #[derive(Debug, Clone)]
Waiting,
NotStarted(String),
Error(ContentHandler),
InProgress(mpsc::Receiver<FetchState>),
Streaming(StreamingHandler<fetch::Response>),
Done(LocalPageEndpoint, Box<PageHandlerWaiting>),
}
enum WaitResult {
Error(ContentHandler),
Done(LocalPageEndpoint),
NonAwaitable,
}
#[derive(Clone)]
pub struct FetchControl { pub struct FetchControl {
abort: Arc<AtomicBool>, abort: Arc<AtomicBool>,
listeners: Arc<Mutex<Vec<(Control, mpsc::Sender<WaitResult>)>>>, listeners: Arc<Mutex<Vec<oneshot::Sender<WaitResult>>>>,
deadline: Instant, deadline: Instant,
} }
@ -81,14 +63,30 @@ impl Default for FetchControl {
} }
impl FetchControl { impl FetchControl {
pub fn is_deadline_reached(&self) -> bool {
self.deadline < Instant::now()
}
pub fn abort(&self) {
self.abort.store(true, Ordering::SeqCst);
}
pub fn to_response(&self, path: EndpointPath) -> endpoint::Response {
let (tx, receiver) = oneshot::channel();
self.listeners.lock().push(tx);
Box::new(WaitingHandler {
path,
state: WaitState::Waiting(receiver),
})
}
fn notify<F: Fn() -> WaitResult>(&self, status: F) { fn notify<F: Fn() -> WaitResult>(&self, status: F) {
let mut listeners = self.listeners.lock(); let mut listeners = self.listeners.lock();
for (control, sender) in listeners.drain(..) { for sender in listeners.drain(..) {
trace!(target: "dapps", "Resuming request waiting for content..."); trace!(target: "dapps", "Resuming request waiting for content...");
if let Err(e) = sender.send(status()) { if let Err(_) = sender.send(status()) {
trace!(target: "dapps", "Waiting listener notification failed: {:?}", e); trace!(target: "dapps", "Waiting listener notification failed.");
} else {
let _ = control.ready(Next::read());
} }
} }
} }
@ -98,92 +96,79 @@ impl FetchControl {
FetchState::Error(ref handler) => self.notify(|| WaitResult::Error(handler.clone())), FetchState::Error(ref handler) => self.notify(|| WaitResult::Error(handler.clone())),
FetchState::Done(ref endpoint, _) => self.notify(|| WaitResult::Done(endpoint.clone())), FetchState::Done(ref endpoint, _) => self.notify(|| WaitResult::Done(endpoint.clone())),
FetchState::Streaming(_) => self.notify(|| WaitResult::NonAwaitable), FetchState::Streaming(_) => self.notify(|| WaitResult::NonAwaitable),
FetchState::NotStarted(_) | FetchState::InProgress(_) | FetchState::Waiting => {}, FetchState::InProgress(_) => {},
FetchState::Empty => {},
} }
} }
}
pub fn is_deadline_reached(&self) -> bool {
self.deadline < Instant::now()
}
pub fn abort(&self) { enum WaitState {
self.abort.store(true, Ordering::SeqCst); Waiting(oneshot::Receiver<WaitResult>),
} Done(endpoint::Response),
}
pub fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<server::Handler<HttpStream> + Send> { #[derive(Debug)]
let (tx, rx) = mpsc::channel(); enum WaitResult {
self.listeners.lock().push((control, tx)); Error(ContentHandler),
Done(local::Dapp),
Box::new(WaitingHandler { NonAwaitable,
receiver: rx,
state: FetchState::Waiting,
uri: RequestUri::default(),
path: path,
})
}
} }
pub struct WaitingHandler { pub struct WaitingHandler {
receiver: mpsc::Receiver<WaitResult>,
state: FetchState,
uri: RequestUri,
path: EndpointPath, path: EndpointPath,
state: WaitState,
} }
impl server::Handler<HttpStream> for WaitingHandler { impl Future for WaitingHandler {
fn on_request(&mut self, request: server::Request<HttpStream>) -> Next { type Item = hyper::Response;
self.uri = request.uri().clone(); type Error = hyper::Error;
Next::wait()
}
fn on_request_readable(&mut self, decoder: &mut Decoder<HttpStream>) -> Next { fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
let result = self.receiver.try_recv().ok(); loop {
self.state = match result { let new_state = match self.state {
Some(WaitResult::Error(handler)) => FetchState::Error(handler), WaitState::Waiting(ref mut receiver) => {
Some(WaitResult::Done(endpoint)) => { let result = try_ready!(receiver.poll().map_err(|_| hyper::Error::Timeout));
let mut page_handler = endpoint.to_page_handler(self.path.clone());
page_handler.set_uri(&self.uri);
FetchState::Done(endpoint, page_handler)
},
_ => {
warn!("A result for waiting request was not received.");
FetchState::Waiting
},
};
match self.state { match result {
FetchState::Done(_, ref mut handler) => handler.on_request_readable(decoder), WaitResult::Error(handler) => {
FetchState::Streaming(ref mut handler) => handler.on_request_readable(decoder), return Ok(futures::Async::Ready(handler.into()));
FetchState::Error(ref mut handler) => handler.on_request_readable(decoder), },
_ => Next::write(), WaitResult::NonAwaitable => {
} let errors = Errors { embeddable_on: None };
} return Ok(futures::Async::Ready(errors.streaming().into()));
},
WaitResult::Done(endpoint) => {
WaitState::Done(endpoint.to_response(&self.path).into())
},
}
},
WaitState::Done(ref mut response) => {
return response.poll()
},
};
fn on_response(&mut self, res: &mut server::Response) -> Next { self.state = new_state;
match self.state {
FetchState::Done(_, ref mut handler) => handler.on_response(res),
FetchState::Streaming(ref mut handler) => handler.on_response(res),
FetchState::Error(ref mut handler) => handler.on_response(res),
_ => Next::end(),
}
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
match self.state {
FetchState::Done(_, ref mut handler) => handler.on_response_writable(encoder),
FetchState::Streaming(ref mut handler) => handler.on_response_writable(encoder),
FetchState::Error(ref mut handler) => handler.on_response_writable(encoder),
_ => Next::end(),
} }
} }
} }
#[derive(Clone)] #[derive(Debug, Clone)]
struct Errors { struct Errors {
embeddable_on: Embeddable, embeddable_on: Embeddable,
} }
impl Errors { impl Errors {
fn streaming(&self) -> ContentHandler {
ContentHandler::error(
StatusCode::BadGateway,
"Streaming Error",
"This content is being streamed in other place.",
None,
self.embeddable_on.clone(),
)
}
fn download_error<E: fmt::Debug>(&self, e: E) -> ContentHandler { fn download_error<E: fmt::Debug>(&self, e: E) -> ContentHandler {
ContentHandler::error( ContentHandler::error(
StatusCode::BadGateway, StatusCode::BadGateway,
@ -225,67 +210,102 @@ impl Errors {
} }
} }
pub struct ContentFetcherHandler<H: ContentValidator, F: Fetch> { enum FetchState {
Error(ContentHandler),
InProgress(BoxFuture<FetchState, ()>),
Streaming(hyper::Response),
Done(local::Dapp, endpoint::Response),
Empty,
}
impl fmt::Debug for FetchState {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::FetchState::*;
write!(fmt, "FetchState(")?;
match *self {
Error(ref error) => write!(fmt, "error: {:?}", error),
InProgress(_) => write!(fmt, "in progress"),
Streaming(ref res) => write!(fmt, "streaming: {:?}", res),
Done(ref endpoint, _) => write!(fmt, "done: {:?}", endpoint),
Empty => write!(fmt, "?"),
}?;
write!(fmt, ")")
}
}
#[derive(Debug)]
pub struct ContentFetcherHandler {
fetch_control: FetchControl, fetch_control: FetchControl,
control: Control,
remote: Remote,
status: FetchState, status: FetchState,
fetch: F,
installer: Option<H>,
path: EndpointPath,
errors: Errors, errors: Errors,
} }
impl<H: ContentValidator, F: Fetch> ContentFetcherHandler<H, F> { impl ContentFetcherHandler {
pub fn new(
url: String,
path: EndpointPath,
control: Control,
installer: H,
embeddable_on: Embeddable,
remote: Remote,
fetch: F,
) -> Self {
ContentFetcherHandler {
fetch_control: FetchControl::default(),
control,
remote,
fetch,
status: FetchState::NotStarted(url),
installer: Some(installer),
path,
errors: Errors {
embeddable_on,
},
}
}
pub fn fetch_control(&self) -> FetchControl { pub fn fetch_control(&self) -> FetchControl {
self.fetch_control.clone() self.fetch_control.clone()
} }
fn fetch_content(&self, uri: RequestUri, url: &str, installer: H) -> mpsc::Receiver<FetchState> { pub fn new<H: ContentValidator, F: Fetch>(
let (tx, rx) = mpsc::channel(); method: &hyper::Method,
let abort = self.fetch_control.abort.clone(); url: &str,
path: EndpointPath,
installer: H,
embeddable_on: Embeddable,
fetch: F,
) -> Self {
let fetch_control = FetchControl::default();
let errors = Errors { embeddable_on };
let path = self.path.clone(); // Validation of method
let tx2 = tx.clone(); let status = match *method {
let control = self.control.clone(); // Start fetching content
let errors = self.errors.clone(); Method::Get => {
trace!(target: "dapps", "Fetching content from: {:?}", url);
FetchState::InProgress(Self::fetch_content(
fetch,
url,
fetch_control.abort.clone(),
path,
errors.clone(),
installer,
))
},
// or return error
_ => FetchState::Error(errors.method_not_allowed()),
};
let future = self.fetch.fetch_with_abort(url, abort.into()).then(move |result| { ContentFetcherHandler {
fetch_control,
status,
errors,
}
}
fn fetch_content<H: ContentValidator, F: Fetch>(
fetch: F,
url: &str,
abort: Arc<AtomicBool>,
path: EndpointPath,
errors: Errors,
installer: H,
) -> BoxFuture<FetchState, ()> {
// Start fetching the content
let fetch2 = fetch.clone();
let future = fetch.fetch_with_abort(url, abort.into()).then(move |result| {
trace!(target: "dapps", "Fetching content finished. Starting validation: {:?}", result); trace!(target: "dapps", "Fetching content finished. Starting validation: {:?}", result);
let new_state = match result { Ok(match result {
Ok(response) => match installer.validate_and_install(response) { Ok(response) => match installer.validate_and_install(response) {
Ok(ValidatorResponse::Local(endpoint)) => { Ok(ValidatorResponse::Local(endpoint)) => {
trace!(target: "dapps", "Validation OK. Returning response."); trace!(target: "dapps", "Validation OK. Returning response.");
let mut handler = endpoint.to_page_handler(path); let response = endpoint.to_response(&path);
handler.set_uri(&uri); FetchState::Done(endpoint, response)
FetchState::Done(endpoint, handler)
}, },
Ok(ValidatorResponse::Streaming(handler)) => { Ok(ValidatorResponse::Streaming(stream)) => {
trace!(target: "dapps", "Validation OK. Streaming response."); trace!(target: "dapps", "Validation OK. Streaming response.");
FetchState::Streaming(handler) let (reading, response) = stream.into_response();
fetch2.process_and_forget(reading);
FetchState::Streaming(response)
}, },
Err(e) => { Err(e) => {
trace!(target: "dapps", "Error while validating content: {:?}", e); trace!(target: "dapps", "Error while validating content: {:?}", e);
@ -296,100 +316,55 @@ impl<H: ContentValidator, F: Fetch> ContentFetcherHandler<H, F> {
warn!(target: "dapps", "Unable to fetch content: {:?}", e); warn!(target: "dapps", "Unable to fetch content: {:?}", e);
FetchState::Error(errors.download_error(e)) FetchState::Error(errors.download_error(e))
}, },
}; })
// Content may be resolved when the connection is already dropped.
let _ = tx2.send(new_state);
// Ignoring control errors
let _ = control.ready(Next::read());
Ok(()) as Result<(), ()>
}); });
// make sure to run within fetch thread pool. // make sure to run within fetch thread pool.
let future = self.fetch.process(future); fetch.process(future)
// spawn to event loop
let control = self.control.clone();
let errors = self.errors.clone();
self.remote.spawn_with_timeout(|| future, Duration::from_secs(FETCH_TIMEOUT), move || {
// Notify about the timeout
let _ = tx.send(FetchState::Error(errors.timeout_error()));
// Ignoring control errors
let _ = control.ready(Next::read());
});
rx
} }
} }
impl<H: ContentValidator, F: Fetch> server::Handler<HttpStream> for ContentFetcherHandler<H, F> { impl Future for ContentFetcherHandler {
fn on_request(&mut self, request: server::Request<HttpStream>) -> Next { type Item = hyper::Response;
let status = if let FetchState::NotStarted(ref url) = self.status { type Error = hyper::Error;
let uri = request.uri().clone();
let installer = self.installer.take().expect("Installer always set initialy; installer used only in on_request; on_request invoked only once; qed");
Some(match *request.method() { fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
// Start fetching content loop {
Method::Get => { trace!(target: "dapps", "Polling status: {:?}", self.status);
trace!(target: "dapps", "Fetching content from: {:?}", url); self.status = match mem::replace(&mut self.status, FetchState::Empty) {
let receiver = self.fetch_content(uri, url, installer); FetchState::Error(error) => {
FetchState::InProgress(receiver) return Ok(futures::Async::Ready(error.into()));
}, },
// or return error FetchState::Streaming(response) => {
_ => FetchState::Error(self.errors.method_not_allowed()), return Ok(futures::Async::Ready(response));
}) },
} else { None }; any => any,
};
if let Some(status) = status { let status = match self.status {
// Request may time out
FetchState::InProgress(_) if self.fetch_control.is_deadline_reached() => {
trace!(target: "dapps", "Fetching dapp failed because of timeout.");
FetchState::Error(self.errors.timeout_error())
},
FetchState::InProgress(ref mut receiver) => {
// Check if there is a response
trace!(target: "dapps", "Polling streaming response.");
try_ready!(receiver.poll().map_err(|err| {
warn!(target: "dapps", "Error while fetching response: {:?}", err);
hyper::Error::Timeout
}))
},
FetchState::Done(_, ref mut response) => {
return response.poll()
},
FetchState::Empty => panic!("Future polled twice."),
_ => unreachable!(),
};
trace!(target: "dapps", "New status: {:?}", status);
self.fetch_control.set_status(&status); self.fetch_control.set_status(&status);
self.status = status; self.status = status;
} }
Next::read()
}
fn on_request_readable(&mut self, decoder: &mut Decoder<HttpStream>) -> Next {
let (status, next) = match self.status {
// Request may time out
FetchState::InProgress(_) if self.fetch_control.is_deadline_reached() => {
trace!(target: "dapps", "Fetching dapp failed because of timeout.");
(Some(FetchState::Error(self.errors.timeout_error())), Next::write())
},
FetchState::InProgress(ref receiver) => {
// Check if there is an answer
let rec = receiver.try_recv();
match rec {
// just return the new state
Ok(state) => (Some(state), Next::write()),
// wait some more
_ => (None, Next::wait())
}
},
FetchState::Error(ref mut handler) => (None, handler.on_request_readable(decoder)),
_ => (None, Next::write()),
};
if let Some(status) = status {
self.fetch_control.set_status(&status);
self.status = status;
}
next
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
match self.status {
FetchState::Done(_, ref mut handler) => handler.on_response(res),
FetchState::Streaming(ref mut handler) => handler.on_response(res),
FetchState::Error(ref mut handler) => handler.on_response(res),
_ => Next::end(),
}
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
match self.status {
FetchState::Done(_, ref mut handler) => handler.on_response_writable(encoder),
FetchState::Streaming(ref mut handler) => handler.on_response_writable(encoder),
FetchState::Error(ref mut handler) => handler.on_response_writable(encoder),
_ => Next::end(),
}
} }
} }

View File

@ -16,80 +16,79 @@
//! Hyper handlers implementations. //! Hyper handlers implementations.
mod async;
mod content; mod content;
mod echo; mod echo;
mod fetch; mod fetch;
mod reader;
mod redirect; mod redirect;
mod streaming; mod streaming;
pub use self::async::AsyncHandler;
pub use self::content::ContentHandler; pub use self::content::ContentHandler;
pub use self::echo::EchoHandler; pub use self::echo::EchoHandler;
pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse}; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse};
pub use self::reader::Reader;
pub use self::redirect::Redirection; pub use self::redirect::Redirection;
pub use self::streaming::StreamingHandler; pub use self::streaming::StreamingHandler;
use std::iter; use std::iter;
use itertools::Itertools; use itertools::Itertools;
use url::Url; use hyper::header;
use hyper::{server, header, net, uri};
use {apps, address, Embeddable}; use {apps, address, Embeddable};
/// Adds security-related headers to the Response. /// Adds security-related headers to the Response.
pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embeddable) { pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embeddable) {
headers.set_raw("X-XSS-Protection", vec![b"1; mode=block".to_vec()]); headers.set_raw("X-XSS-Protection", "1; mode=block");
headers.set_raw("X-Content-Type-Options", vec![b"nosniff".to_vec()]); headers.set_raw("X-Content-Type-Options", "nosniff");
// Embedding header: // Embedding header:
if let None = embeddable_on { if let None = embeddable_on {
headers.set_raw("X-Frame-Options", vec![b"SAMEORIGIN".to_vec()]); headers.set_raw("X-Frame-Options", "SAMEORIGIN");
} }
// Content Security Policy headers // Content Security Policy headers
headers.set_raw("Content-Security-Policy", vec![ headers.set_raw("Content-Security-Policy", String::new()
// Allow connecting to WS servers and HTTP(S) servers. // Allow connecting to WS servers and HTTP(S) servers.
// We could be more restrictive and allow only RPC server URL. // We could be more restrictive and allow only RPC server URL.
b"connect-src http: https: ws: wss:;".to_vec(), + "connect-src http: https: ws: wss:;"
// Allow framing any content from HTTP(S). // Allow framing any content from HTTP(S).
// Again we could only allow embedding from RPC server URL. // Again we could only allow embedding from RPC server URL.
// (deprecated) // (deprecated)
b"frame-src 'self' http: https:;".to_vec(), + "frame-src 'self' http: https:;"
// Allow framing and web workers from HTTP(S). // Allow framing and web workers from HTTP(S).
b"child-src 'self' http: https:;".to_vec(), + "child-src 'self' http: https:;"
// We allow data: blob: and HTTP(s) images. // We allow data: blob: and HTTP(s) images.
// We could get rid of wildcarding HTTP and only allow RPC server URL. // We could get rid of wildcarding HTTP and only allow RPC server URL.
// (http required for local dapps icons) // (http required for local dapps icons)
b"img-src 'self' 'unsafe-inline' data: blob: http: https:;".to_vec(), + "img-src 'self' 'unsafe-inline' data: blob: http: https:;"
// Allow style from data: blob: and HTTPS. // Allow style from data: blob: and HTTPS.
b"style-src 'self' 'unsafe-inline' data: blob: https:;".to_vec(), + "style-src 'self' 'unsafe-inline' data: blob: https:;"
// Allow fonts from data: and HTTPS. // Allow fonts from data: and HTTPS.
b"font-src 'self' data: https:;".to_vec(), + "font-src 'self' data: https:;"
// Allow inline scripts and scripts eval (webpack/jsconsole) // Allow inline scripts and scripts eval (webpack/jsconsole)
{ + {
let script_src = embeddable_on.as_ref() let script_src = embeddable_on.as_ref()
.map(|e| e.extra_script_src.iter() .map(|e| e.extra_script_src.iter()
.map(|&(ref host, port)| address(host, port)) .map(|&(ref host, port)| address(host, port))
.join(" ") .join(" ")
).unwrap_or_default(); ).unwrap_or_default();
format!( &format!(
"script-src 'self' 'unsafe-inline' 'unsafe-eval' {};", "script-src 'self' 'unsafe-inline' 'unsafe-eval' {};",
script_src script_src
).into_bytes() )
}, }
// Same restrictions as script-src with additional // Same restrictions as script-src with additional
// blob: that is required for camera access (worker) // blob: that is required for camera access (worker)
b"worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;".to_vec(), + "worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;"
// Restrict everything else to the same origin. // Restrict everything else to the same origin.
b"default-src 'self';".to_vec(), + "default-src 'self';"
// Run in sandbox mode (although it's not fully safe since we allow same-origin and script) // Run in sandbox mode (although it's not fully safe since we allow same-origin and script)
b"sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;".to_vec(), + "sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;"
// Disallow subitting forms from any dapps // Disallow subitting forms from any dapps
b"form-action 'none';".to_vec(), + "form-action 'none';"
// Never allow mixed content // Never allow mixed content
b"block-all-mixed-content;".to_vec(), + "block-all-mixed-content;"
// Specify if the site can be embedded. // Specify if the site can be embedded.
match embeddable_on { + &match embeddable_on {
Some(ref embed) => { Some(ref embed) => {
let std = address(&embed.host, embed.port); let std = address(&embed.host, embed.port);
let proxy = format!("{}.{}", apps::HOME_PAGE, embed.dapps_domain); let proxy = format!("{}.{}", apps::HOME_PAGE, embed.dapps_domain);
@ -112,43 +111,6 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
format!("frame-ancestors {};", ancestors) format!("frame-ancestors {};", ancestors)
}, },
None => format!("frame-ancestors 'self';"), None => format!("frame-ancestors 'self';"),
}.into_bytes(), }
]); );
}
/// Extracts URL part from the Request.
pub fn extract_url(req: &server::Request<net::HttpStream>) -> Option<Url> {
convert_uri_to_url(req.uri(), req.headers().get::<header::Host>())
}
/// Extracts URL given URI and Host header.
pub fn convert_uri_to_url(uri: &uri::RequestUri, host: Option<&header::Host>) -> Option<Url> {
match *uri {
uri::RequestUri::AbsoluteUri(ref url) => {
match Url::from_generic_url(url.clone()) {
Ok(url) => Some(url),
_ => None,
}
},
uri::RequestUri::AbsolutePath { ref path, ref query } => {
let query = match *query {
Some(ref query) => format!("?{}", query),
None => "".into(),
};
// Attempt to prepend the Host header (mandatory in HTTP/1.1)
let url_string = match host {
Some(ref host) => {
format!("http://{}:{}{}{}", host.hostname, host.port.unwrap_or(80), path, query)
},
None => return None,
};
match Url::parse(&url_string) {
Ok(url) => Some(url),
_ => None,
}
},
_ => None,
}
} }

View File

@ -0,0 +1,73 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! A chunk-producing io::Read wrapper.
use std::io::{self, Read};
use futures::{self, sink, Sink, Future};
use futures::sync::mpsc;
use hyper;
type Sender = mpsc::Sender<Result<hyper::Chunk, hyper::Error>>;
const MAX_CHUNK_SIZE: usize = 32 * 1024;
/// A Reader is essentially a stream of `hyper::Chunks`.
/// The chunks are read from given `io::Read` instance.
///
/// Unfortunately `hyper` doesn't allow you to pass `Stream`
/// directly to the response, so you need to create
/// a `Body::pair()` and send over chunks using `sink::Send`.
/// Also `Chunks` need to take `Vec` by value, so we need
/// to allocate it for each chunk being sent.
pub struct Reader<R: io::Read> {
buffer: [u8; MAX_CHUNK_SIZE],
content: io::BufReader<R>,
sending: sink::Send<Sender>,
}
impl<R: io::Read> Reader<R> {
pub fn pair(content: R, initial: Vec<u8>) -> (Self, hyper::Body) {
let (tx, rx) = hyper::Body::pair();
let reader = Reader {
buffer: [0; MAX_CHUNK_SIZE],
content: io::BufReader::new(content),
sending: tx.send(Ok(initial.into())),
};
(reader, rx)
}
}
impl<R: io::Read> Future for Reader<R> {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
let next = try_ready!(self.sending.poll().map_err(|err| {
warn!(target: "dapps", "Unable to send next chunk: {:?}", err);
}));
self.sending = match self.content.read(&mut self.buffer) {
Ok(0) => return Ok(futures::Async::Ready(())),
Ok(read) => next.send(Ok(self.buffer[..read].to_vec().into())),
Err(err) => next.send(Err(hyper::Error::Io(err))),
}
}
}
}

View File

@ -16,9 +16,7 @@
//! HTTP Redirection hyper handler //! HTTP Redirection hyper handler
use hyper::{header, server, Decoder, Encoder, Next}; use hyper::{self, header, StatusCode};
use hyper::net::HttpStream;
use hyper::status::StatusCode;
#[derive(Clone)] #[derive(Clone)]
pub struct Redirection { pub struct Redirection {
@ -26,36 +24,18 @@ pub struct Redirection {
} }
impl Redirection { impl Redirection {
pub fn new(url: &str) -> Self { pub fn new<T: Into<String>>(url: T) -> Self {
Redirection { Redirection {
to_url: url.to_owned() to_url: url.into()
} }
} }
pub fn boxed(url: &str) -> Box<Self> {
Box::new(Self::new(url))
}
} }
impl server::Handler<HttpStream> for Redirection { impl Into<hyper::Response> for Redirection {
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next { fn into(self) -> hyper::Response {
Next::write()
}
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
Next::write()
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
// Don't use `MovedPermanently` here to prevent browser from caching the redirections. // Don't use `MovedPermanently` here to prevent browser from caching the redirections.
res.set_status(StatusCode::Found); hyper::Response::new()
res.headers_mut().set(header::Location(self.to_url.to_owned())); .with_status(StatusCode::Found)
Next::write() .with_header(header::Location::new(self.to_url))
}
fn on_response_writable(&mut self, _encoder: &mut Encoder<HttpStream>) -> Next {
Next::end()
} }
} }

View File

@ -16,87 +16,43 @@
//! Content Stream Response //! Content Stream Response
use std::io::{self, Read}; use std::io;
use hyper::{self, header, mime, StatusCode};
use hyper::{header, server, Decoder, Encoder, Next}; use handlers::{add_security_headers, Reader};
use hyper::net::HttpStream;
use hyper::mime::Mime;
use hyper::status::StatusCode;
use handlers::add_security_headers;
use Embeddable; use Embeddable;
const BUFFER_SIZE: usize = 1024; pub struct StreamingHandler<R> {
initial: Vec<u8>,
pub struct StreamingHandler<R: io::Read> { content: R,
buffer: [u8; BUFFER_SIZE],
buffer_leftover: usize,
status: StatusCode, status: StatusCode,
content: io::BufReader<R>, mimetype: mime::Mime,
mimetype: Mime,
safe_to_embed_on: Embeddable, safe_to_embed_on: Embeddable,
} }
impl<R: io::Read> StreamingHandler<R> { impl<R: io::Read> StreamingHandler<R> {
pub fn new(content: R, status: StatusCode, mimetype: Mime, embeddable_on: Embeddable) -> Self { pub fn new(content: R, status: StatusCode, mimetype: mime::Mime, safe_to_embed_on: Embeddable) -> Self {
StreamingHandler { StreamingHandler {
buffer: [0; BUFFER_SIZE], initial: Vec::new(),
buffer_leftover: 0, content,
status: status, status,
content: io::BufReader::new(content), mimetype,
mimetype: mimetype, safe_to_embed_on,
safe_to_embed_on: embeddable_on,
} }
} }
pub fn set_initial_content(&mut self, content: &str) { pub fn set_initial_content(&mut self, content: &str) {
assert_eq!(self.buffer_leftover, 0); self.initial = content.as_bytes().to_vec();
let bytes = content.as_bytes(); }
self.buffer_leftover = bytes.len();
self.buffer[0..self.buffer_leftover].copy_from_slice(bytes); pub fn into_response(self) -> (Reader<R>, hyper::Response) {
} let (reader, body) = Reader::pair(self.content, self.initial);
} let mut res = hyper::Response::new()
.with_status(self.status)
impl<R: io::Read> server::Handler<HttpStream> for StreamingHandler<R> { .with_header(header::ContentType(self.mimetype))
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next { .with_body(body);
Next::write() add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on);
}
(reader, res)
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
Next::write()
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
res.set_status(self.status);
res.headers_mut().set(header::ContentType(self.mimetype.clone()));
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.take());
Next::write()
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
fn handle_error(e: io::Error) -> Next {
match e.kind() {
::std::io::ErrorKind::WouldBlock => Next::write(),
_ => Next::end(),
}
}
let write_pos = self.buffer_leftover;
match self.content.read(&mut self.buffer[write_pos..]) {
Err(e) => handle_error(e),
Ok(read) => match encoder.write(&self.buffer[..write_pos + read]) {
Err(e) => handle_error(e),
Ok(0) => Next::end(),
Ok(wrote) => {
self.buffer_leftover = write_pos + read - wrote;
if self.buffer_leftover > 0 {
for i in self.buffer_leftover..write_pos + read {
self.buffer.swap(i, i - self.buffer_leftover);
}
}
Next::write()
},
},
}
} }
} }

View File

@ -20,7 +20,7 @@
#![cfg_attr(feature="nightly", plugin(clippy))] #![cfg_attr(feature="nightly", plugin(clippy))]
extern crate base32; extern crate base32;
extern crate futures; extern crate futures_cpupool;
extern crate itertools; extern crate itertools;
extern crate linked_hash_map; extern crate linked_hash_map;
extern crate mime_guess; extern crate mime_guess;
@ -29,9 +29,7 @@ extern crate rand;
extern crate rustc_hex; extern crate rustc_hex;
extern crate serde; extern crate serde;
extern crate serde_json; extern crate serde_json;
extern crate time;
extern crate unicase; extern crate unicase;
extern crate url as url_lib;
extern crate zip; extern crate zip;
extern crate jsonrpc_core; extern crate jsonrpc_core;
@ -44,14 +42,13 @@ extern crate fetch;
extern crate node_health; extern crate node_health;
extern crate parity_dapps_glue as parity_dapps; extern crate parity_dapps_glue as parity_dapps;
extern crate parity_hash_fetch as hash_fetch; extern crate parity_hash_fetch as hash_fetch;
extern crate parity_reactor;
extern crate parity_ui; extern crate parity_ui;
extern crate hash; extern crate hash;
#[macro_use] #[macro_use]
extern crate log; extern crate futures;
#[macro_use] #[macro_use]
extern crate mime; extern crate log;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
@ -59,6 +56,8 @@ extern crate serde_derive;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
#[cfg(test)] #[cfg(test)]
extern crate env_logger; extern crate env_logger;
#[cfg(test)]
extern crate parity_reactor;
mod endpoint; mod endpoint;
mod apps; mod apps;
@ -67,7 +66,6 @@ mod router;
mod handlers; mod handlers;
mod api; mod api;
mod proxypac; mod proxypac;
mod url;
mod web; mod web;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
@ -76,13 +74,12 @@ use std::collections::HashMap;
use std::mem; use std::mem;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use parking_lot::RwLock; use futures_cpupool::CpuPool;
use jsonrpc_http_server::{self as http, hyper, Origin}; use jsonrpc_http_server::{self as http, hyper, Origin};
use parking_lot::RwLock;
use fetch::Fetch; use fetch::Fetch;
use node_health::NodeHealth; use node_health::NodeHealth;
use parity_reactor::Remote;
pub use hash_fetch::urlhint::ContractClient; pub use hash_fetch::urlhint::ContractClient;
pub use node_health::SyncStatus; pub use node_health::SyncStatus;
@ -105,6 +102,7 @@ pub struct Endpoints {
endpoints: Arc<RwLock<endpoint::Endpoints>>, endpoints: Arc<RwLock<endpoint::Endpoints>>,
dapps_path: PathBuf, dapps_path: PathBuf,
embeddable: Option<ParentFrameSettings>, embeddable: Option<ParentFrameSettings>,
pool: Option<CpuPool>,
} }
impl Endpoints { impl Endpoints {
@ -117,7 +115,11 @@ impl Endpoints {
/// Check for any changes in the local dapps folder and update. /// Check for any changes in the local dapps folder and update.
pub fn refresh_local_dapps(&self) { pub fn refresh_local_dapps(&self) {
let new_local = apps::fs::local_endpoints(&self.dapps_path, self.embeddable.clone()); let pool = match self.pool.as_ref() {
None => return,
Some(pool) => pool,
};
let new_local = apps::fs::local_endpoints(&self.dapps_path, self.embeddable.clone(), pool.clone());
let old_local = mem::replace(&mut *self.local_endpoints.write(), new_local.keys().cloned().collect()); let old_local = mem::replace(&mut *self.local_endpoints.write(), new_local.keys().cloned().collect());
let (_, to_remove): (_, Vec<_>) = old_local let (_, to_remove): (_, Vec<_>) = old_local
.into_iter() .into_iter()
@ -151,8 +153,8 @@ impl Middleware {
/// Creates new middleware for UI server. /// Creates new middleware for UI server.
pub fn ui<F: Fetch>( pub fn ui<F: Fetch>(
pool: CpuPool,
health: NodeHealth, health: NodeHealth,
remote: Remote,
dapps_domain: &str, dapps_domain: &str,
registrar: Arc<ContractClient>, registrar: Arc<ContractClient>,
sync_status: Arc<SyncStatus>, sync_status: Arc<SyncStatus>,
@ -161,16 +163,16 @@ impl Middleware {
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
hash_fetch::urlhint::URLHintContract::new(registrar), hash_fetch::urlhint::URLHintContract::new(registrar),
sync_status.clone(), sync_status.clone(),
remote.clone(),
fetch.clone(), fetch.clone(),
pool.clone(),
).embeddable_on(None).allow_dapps(false)); ).embeddable_on(None).allow_dapps(false));
let special = { let special = {
let mut special = special_endpoints( let mut special = special_endpoints(
pool.clone(),
health, health,
content_fetcher.clone(), content_fetcher.clone(),
remote.clone(),
); );
special.insert(router::SpecialEndpoint::Home, Some(apps::ui())); special.insert(router::SpecialEndpoint::Home, Some(apps::ui(pool.clone())));
special special
}; };
let router = router::Router::new( let router = router::Router::new(
@ -189,8 +191,8 @@ impl Middleware {
/// Creates new Dapps server middleware. /// Creates new Dapps server middleware.
pub fn dapps<F: Fetch>( pub fn dapps<F: Fetch>(
pool: CpuPool,
health: NodeHealth, health: NodeHealth,
remote: Remote,
ui_address: Option<(String, u16)>, ui_address: Option<(String, u16)>,
extra_embed_on: Vec<(String, u16)>, extra_embed_on: Vec<(String, u16)>,
extra_script_src: Vec<(String, u16)>, extra_script_src: Vec<(String, u16)>,
@ -206,8 +208,8 @@ impl Middleware {
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
hash_fetch::urlhint::URLHintContract::new(registrar), hash_fetch::urlhint::URLHintContract::new(registrar),
sync_status.clone(), sync_status.clone(),
remote.clone(),
fetch.clone(), fetch.clone(),
pool.clone(),
).embeddable_on(embeddable.clone()).allow_dapps(true)); ).embeddable_on(embeddable.clone()).allow_dapps(true));
let (local_endpoints, endpoints) = apps::all_endpoints( let (local_endpoints, endpoints) = apps::all_endpoints(
dapps_path.clone(), dapps_path.clone(),
@ -215,21 +217,22 @@ impl Middleware {
dapps_domain, dapps_domain,
embeddable.clone(), embeddable.clone(),
web_proxy_tokens, web_proxy_tokens,
remote.clone(),
fetch.clone(), fetch.clone(),
pool.clone(),
); );
let endpoints = Endpoints { let endpoints = Endpoints {
endpoints: Arc::new(RwLock::new(endpoints)), endpoints: Arc::new(RwLock::new(endpoints)),
dapps_path, dapps_path,
local_endpoints: Arc::new(RwLock::new(local_endpoints)), local_endpoints: Arc::new(RwLock::new(local_endpoints)),
embeddable: embeddable.clone(), embeddable: embeddable.clone(),
pool: Some(pool.clone()),
}; };
let special = { let special = {
let mut special = special_endpoints( let mut special = special_endpoints(
pool.clone(),
health, health,
content_fetcher.clone(), content_fetcher.clone(),
remote.clone(),
); );
special.insert( special.insert(
router::SpecialEndpoint::Home, router::SpecialEndpoint::Home,
@ -254,23 +257,22 @@ impl Middleware {
} }
impl http::RequestMiddleware for Middleware { impl http::RequestMiddleware for Middleware {
fn on_request(&self, req: &hyper::server::Request<hyper::net::HttpStream>, control: &hyper::Control) -> http::RequestMiddlewareAction { fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction {
self.router.on_request(req, control) self.router.on_request(req)
} }
} }
fn special_endpoints( fn special_endpoints(
pool: CpuPool,
health: NodeHealth, health: NodeHealth,
content_fetcher: Arc<apps::fetcher::Fetcher>, content_fetcher: Arc<apps::fetcher::Fetcher>,
remote: Remote,
) -> HashMap<router::SpecialEndpoint, Option<Box<endpoint::Endpoint>>> { ) -> HashMap<router::SpecialEndpoint, Option<Box<endpoint::Endpoint>>> {
let mut special = HashMap::new(); let mut special = HashMap::new();
special.insert(router::SpecialEndpoint::Rpc, None); special.insert(router::SpecialEndpoint::Rpc, None);
special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); special.insert(router::SpecialEndpoint::Utils, Some(apps::utils(pool)));
special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new( special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new(
content_fetcher, content_fetcher,
health, health,
remote,
))); )));
special special
} }

View File

@ -14,71 +14,62 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io;
use futures::future;
use futures_cpupool::CpuPool;
use hyper::mime::{self, Mime};
use itertools::Itertools;
use parity_dapps::{WebApp, Info};
use endpoint::{Endpoint, EndpointInfo, EndpointPath, Request, Response};
use page::{handler, PageCache}; use page::{handler, PageCache};
use std::sync::Arc;
use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler};
use parity_dapps::{WebApp, File, Info};
use Embeddable; use Embeddable;
pub struct PageEndpoint<T : WebApp + 'static> { pub struct Dapp<T: WebApp + 'static> {
/// futures cpu pool
pool: CpuPool,
/// Content of the files /// Content of the files
pub app: Arc<T>, app: T,
/// Prefix to strip from the path (when `None` deducted from `app_id`)
pub prefix: Option<String>,
/// Safe to be loaded in frame by other origin. (use wisely!) /// Safe to be loaded in frame by other origin. (use wisely!)
safe_to_embed_on: Embeddable, safe_to_embed_on: Embeddable,
info: EndpointInfo, info: EndpointInfo,
fallback_to_index_html: bool, fallback_to_index_html: bool,
} }
impl<T: WebApp + 'static> PageEndpoint<T> { impl<T: WebApp + 'static> Dapp<T> {
/// Creates new `PageEndpoint` for builtin (compile time) Dapp. /// Creates new `Dapp` for builtin (compile time) Dapp.
pub fn new(app: T) -> Self { pub fn new(pool: CpuPool, app: T) -> Self {
let info = app.info(); let info = app.info();
PageEndpoint { Dapp {
app: Arc::new(app), pool,
prefix: None, app,
safe_to_embed_on: None, safe_to_embed_on: None,
info: EndpointInfo::from(info), info: EndpointInfo::from(info),
fallback_to_index_html: false, fallback_to_index_html: false,
} }
} }
/// Creates a new `PageEndpoint` for builtin (compile time) Dapp. /// Creates a new `Dapp` for builtin (compile time) Dapp.
/// Instead of returning 404 this endpoint will always server index.html. /// Instead of returning 404 this endpoint will always server index.html.
pub fn with_fallback_to_index(app: T) -> Self { pub fn with_fallback_to_index(pool: CpuPool, app: T) -> Self {
let info = app.info(); let info = app.info();
PageEndpoint { Dapp {
app: Arc::new(app), pool,
prefix: None, app,
safe_to_embed_on: None, safe_to_embed_on: None,
info: EndpointInfo::from(info), info: EndpointInfo::from(info),
fallback_to_index_html: true, fallback_to_index_html: true,
} }
} }
/// Create new `PageEndpoint` and specify prefix that should be removed before looking for a file. /// Creates new `Dapp` which can be safely used in iframe
/// It's used only for special endpoints (i.e. `/parity-utils/`)
/// So `/parity-utils/inject.js` will be resolved to `/inject.js` is prefix is set.
pub fn with_prefix(app: T, prefix: String) -> Self {
let info = app.info();
PageEndpoint {
app: Arc::new(app),
prefix: Some(prefix),
safe_to_embed_on: None,
info: EndpointInfo::from(info),
fallback_to_index_html: false,
}
}
/// Creates new `PageEndpoint` which can be safely used in iframe
/// even from different origin. It might be dangerous (clickjacking). /// even from different origin. It might be dangerous (clickjacking).
/// Use wisely! /// Use wisely!
pub fn new_safe_to_embed(app: T, address: Embeddable) -> Self { pub fn new_safe_to_embed(pool: CpuPool, app: T, address: Embeddable) -> Self {
let info = app.info(); let info = app.info();
PageEndpoint { Dapp {
app: Arc::new(app), pool,
prefix: None, app,
safe_to_embed_on: address, safe_to_embed_on: address,
info: EndpointInfo::from(info), info: EndpointInfo::from(info),
fallback_to_index_html: false, fallback_to_index_html: false,
@ -86,21 +77,51 @@ impl<T: WebApp + 'static> PageEndpoint<T> {
} }
} }
impl<T: WebApp> Endpoint for PageEndpoint<T> { impl<T: WebApp> Endpoint for Dapp<T> {
fn info(&self) -> Option<&EndpointInfo> { fn info(&self) -> Option<&EndpointInfo> {
Some(&self.info) Some(&self.info)
} }
fn to_handler(&self, path: EndpointPath) -> Box<Handler> { fn respond(&self, path: EndpointPath, _req: Request) -> Response {
Box::new(handler::PageHandler { trace!(target: "dapps", "Builtin file path: {:?}", path);
app: BuiltinDapp::new(self.app.clone(), self.fallback_to_index_html), let file_path = if path.has_no_params() {
prefix: self.prefix.clone(), "index.html".to_owned()
path: path, } else {
file: handler::ServedFile::new(self.safe_to_embed_on.clone()), path.app_params.into_iter().filter(|x| !x.is_empty()).join("/")
};
trace!(target: "dapps", "Builtin file: {:?}", file_path);
let file = {
let file = |path| self.app.file(path).map(|file| {
let content_type = match file.content_type.parse() {
Ok(mime) => mime,
Err(_) => {
warn!(target: "dapps", "invalid MIME type: {}", file.content_type);
mime::TEXT_HTML
},
};
BuiltinFile {
content_type,
content: io::Cursor::new(file.content),
}
});
let res = file(&file_path);
if self.fallback_to_index_html {
res.or_else(|| file("index.html"))
} else {
res
}
};
let (reader, response) = handler::PageHandler {
file,
cache: PageCache::Disabled, cache: PageCache::Disabled,
safe_to_embed_on: self.safe_to_embed_on.clone(), safe_to_embed_on: self.safe_to_embed_on.clone(),
}) }.into_response();
self.pool.spawn(reader).forget();
Box::new(future::ok(response))
} }
} }
@ -116,66 +137,20 @@ impl From<Info> for EndpointInfo {
} }
} }
struct BuiltinDapp<T: WebApp + 'static> {
app: Arc<T>, struct BuiltinFile {
fallback_to_index_html: bool, content_type: Mime,
content: io::Cursor<&'static [u8]>,
} }
impl<T: WebApp + 'static> BuiltinDapp<T> { impl handler::DappFile for BuiltinFile {
fn new(app: Arc<T>, fallback_to_index_html: bool) -> Self { type Reader = io::Cursor<&'static [u8]>;
BuiltinDapp {
app: app, fn content_type(&self) -> &Mime {
fallback_to_index_html: fallback_to_index_html, &self.content_type
} }
}
} fn into_reader(self) -> Self::Reader {
self.content
impl<T: WebApp + 'static> handler::Dapp for BuiltinDapp<T> {
type DappFile = BuiltinDappFile<T>;
fn file(&self, path: &str) -> Option<Self::DappFile> {
let file = |path| self.app.file(path).map(|_| {
BuiltinDappFile {
app: self.app.clone(),
path: path.into(),
write_pos: 0,
}
});
let res = file(path);
if self.fallback_to_index_html {
res.or_else(|| file("index.html"))
} else {
res
}
}
}
struct BuiltinDappFile<T: WebApp + 'static> {
app: Arc<T>,
path: String,
write_pos: usize,
}
impl<T: WebApp + 'static> BuiltinDappFile<T> {
fn file(&self) -> &File {
self.app.file(&self.path).expect("Check is done when structure is created.")
}
}
impl<T: WebApp + 'static> handler::DappFile for BuiltinDappFile<T> {
fn content_type(&self) -> &str {
self.file().content_type
}
fn is_drained(&self) -> bool {
self.write_pos == self.file().content.len()
}
fn next_chunk(&mut self) -> &[u8] {
&self.file().content[self.write_pos..]
}
fn bytes_written(&mut self, bytes: usize) {
self.write_pos += bytes;
} }
} }

View File

@ -14,61 +14,25 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use time::{self, Duration}; use std::io;
use std::time::{Duration, SystemTime};
use hyper::{self, header, StatusCode};
use hyper::mime::Mime;
use hyper::header; use handlers::{Reader, ContentHandler, add_security_headers};
use hyper::server;
use hyper::uri::RequestUri;
use hyper::net::HttpStream;
use hyper::status::StatusCode;
use hyper::{Decoder, Encoder, Next};
use endpoint::EndpointPath;
use handlers::{ContentHandler, add_security_headers};
use {Embeddable}; use {Embeddable};
/// Represents a file that can be sent to client. /// Represents a file that can be sent to client.
/// Implementation should keep track of bytes already sent internally. /// Implementation should keep track of bytes already sent internally.
pub trait DappFile: Send { pub trait DappFile {
/// A reader type returned by this file.
type Reader: io::Read;
/// Returns a content-type of this file. /// Returns a content-type of this file.
fn content_type(&self) -> &str; fn content_type(&self) -> &Mime;
/// Checks if all bytes from that file were written. /// Convert this file into io::Read instance.
fn is_drained(&self) -> bool; fn into_reader(self) -> Self::Reader where Self: Sized;
/// Fetch next chunk to write to the client.
fn next_chunk(&mut self) -> &[u8];
/// How many files have been written to the client.
fn bytes_written(&mut self, bytes: usize);
}
/// Dapp as a (dynamic) set of files.
pub trait Dapp: Send + 'static {
/// File type
type DappFile: DappFile;
/// Returns file under given path.
fn file(&self, path: &str) -> Option<Self::DappFile>;
}
/// Currently served by `PageHandler` file
pub enum ServedFile<T: Dapp> {
/// File from dapp
File(T::DappFile),
/// Error (404)
Error(ContentHandler),
}
impl<T: Dapp> ServedFile<T> {
pub fn new(embeddable_on: Embeddable) -> Self {
ServedFile::Error(ContentHandler::error(
StatusCode::NotFound,
"404 Not Found",
"Requested dapp resource was not found.",
None,
embeddable_on,
))
}
} }
/// Defines what cache headers should be appended to returned resources. /// Defines what cache headers should be appended to returned resources.
@ -84,194 +48,55 @@ impl Default for PageCache {
} }
} }
/// A generic type for `PageHandler` allowing to set the URL.
/// Used by dapps fetching to set the URL after the content was downloaded.
pub trait PageHandlerWaiting: server::Handler<HttpStream> + Send {
fn set_uri(&mut self, uri: &RequestUri);
}
/// A handler for a single webapp. /// A handler for a single webapp.
/// Resolves correct paths and serves as a plumbing code between /// Resolves correct paths and serves as a plumbing code between
/// hyper server and dapp. /// hyper server and dapp.
pub struct PageHandler<T: Dapp> { pub struct PageHandler<T: DappFile> {
/// A Dapp.
pub app: T,
/// File currently being served /// File currently being served
pub file: ServedFile<T>, pub file: Option<T>,
/// Optional prefix to strip from path.
pub prefix: Option<String>,
/// Requested path.
pub path: EndpointPath,
/// Flag indicating if the file can be safely embeded (put in iframe). /// Flag indicating if the file can be safely embeded (put in iframe).
pub safe_to_embed_on: Embeddable, pub safe_to_embed_on: Embeddable,
/// Cache settings for this page. /// Cache settings for this page.
pub cache: PageCache, pub cache: PageCache,
} }
impl<T: Dapp> PageHandlerWaiting for PageHandler<T> { impl<T: DappFile> PageHandler<T> {
fn set_uri(&mut self, uri: &RequestUri) { pub fn into_response(self) -> (Option<Reader<T::Reader>>, hyper::Response) {
trace!(target: "dapps", "Setting URI: {:?}", uri); let file = match self.file {
self.file = match *uri { None => return (None, ContentHandler::error(
RequestUri::AbsolutePath { ref path, .. } => { StatusCode::NotFound,
self.app.file(&self.extract_path(path)) "File not found",
}, "Requested file has not been found.",
RequestUri::AbsoluteUri(ref url) => { None,
self.app.file(&self.extract_path(url.path())) self.safe_to_embed_on,
}, ).into()),
_ => None, Some(file) => file,
}.map_or_else(|| ServedFile::new(self.safe_to_embed_on.clone()), |f| ServedFile::File(f)); };
}
}
impl<T: Dapp> PageHandler<T> { let mut res = hyper::Response::new()
fn extract_path(&self, path: &str) -> String { .with_status(StatusCode::Ok);
let app_id = &self.path.app_id;
let prefix = "/".to_owned() + self.prefix.as_ref().unwrap_or(app_id);
let prefix_with_slash = prefix.clone() + "/";
let query_pos = path.find('?').unwrap_or_else(|| path.len());
// Index file support // headers
match path == "/" || path == &prefix || path == &prefix_with_slash { {
true => "index.html".to_owned(), let mut headers = res.headers_mut();
false => if path.starts_with(&prefix_with_slash) {
path[prefix_with_slash.len()..query_pos].to_owned() if let PageCache::Enabled = self.cache {
} else if path.starts_with("/") { let validity_secs = 365u32 * 24 * 3600;
path[1..query_pos].to_owned() let validity = Duration::from_secs(validity_secs as u64);
} else { headers.set(header::CacheControl(vec![
path[0..query_pos].to_owned() header::CacheDirective::Public,
header::CacheDirective::MaxAge(validity_secs),
]));
headers.set(header::Expires(header::HttpDate::from(SystemTime::now() + validity)));
} }
headers.set(header::ContentType(file.content_type().to_owned()));
add_security_headers(&mut headers, self.safe_to_embed_on);
} }
let (reader, body) = Reader::pair(file.into_reader(), Vec::new());
res.set_body(body);
(Some(reader), res)
} }
} }
impl<T: Dapp> server::Handler<HttpStream> for PageHandler<T> {
fn on_request(&mut self, req: server::Request<HttpStream>) -> Next {
self.set_uri(req.uri());
Next::write()
}
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
Next::write()
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
match self.file {
ServedFile::File(ref f) => {
res.set_status(StatusCode::Ok);
if let PageCache::Enabled = self.cache {
let mut headers = res.headers_mut();
let validity = Duration::days(365);
headers.set(header::CacheControl(vec![
header::CacheDirective::Public,
header::CacheDirective::MaxAge(validity.num_seconds() as u32),
]));
headers.set(header::Expires(header::HttpDate(time::now() + validity)));
}
match f.content_type().parse() {
Ok(mime) => res.headers_mut().set(header::ContentType(mime)),
Err(()) => debug!(target: "dapps", "invalid MIME type: {}", f.content_type()),
}
// Security headers:
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.take());
Next::write()
},
ServedFile::Error(ref mut handler) => {
handler.on_response(res)
}
}
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
match self.file {
ServedFile::Error(ref mut handler) => handler.on_response_writable(encoder),
ServedFile::File(ref f) if f.is_drained() => Next::end(),
ServedFile::File(ref mut f) => match encoder.write(f.next_chunk()) {
Ok(bytes) => {
f.bytes_written(bytes);
Next::write()
},
Err(e) => match e.kind() {
::std::io::ErrorKind::WouldBlock => Next::write(),
_ => Next::end(),
},
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
pub struct TestWebAppFile;
impl DappFile for TestWebAppFile {
fn content_type(&self) -> &str {
unimplemented!()
}
fn is_drained(&self) -> bool {
unimplemented!()
}
fn next_chunk(&mut self) -> &[u8] {
unimplemented!()
}
fn bytes_written(&mut self, _bytes: usize) {
unimplemented!()
}
}
#[derive(Default)]
pub struct TestWebapp;
impl Dapp for TestWebapp {
type DappFile = TestWebAppFile;
fn file(&self, _path: &str) -> Option<Self::DappFile> {
None
}
}
}
#[test]
fn should_extract_path_with_appid() {
// given
let path1 = "/";
let path2= "/test.css";
let path3 = "/app/myfile.txt";
let path4 = "/app/myfile.txt?query=123";
let page_handler = PageHandler {
app: test::TestWebapp,
prefix: None,
path: EndpointPath {
app_id: "app".to_owned(),
app_params: vec![],
host: "".to_owned(),
port: 8080,
using_dapps_domains: true,
},
file: ServedFile::new(None),
cache: Default::default(),
safe_to_embed_on: None,
};
// when
let res1 = page_handler.extract_path(path1);
let res2 = page_handler.extract_path(path2);
let res3 = page_handler.extract_path(path3);
let res4 = page_handler.extract_path(path4);
// then
assert_eq!(&res1, "index.html");
assert_eq!(&res2, "test.css");
assert_eq!(&res3, "myfile.txt");
assert_eq!(&res4, "myfile.txt");
}

View File

@ -15,16 +15,18 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use mime_guess; use mime_guess;
use std::io::{Seek, Read, SeekFrom}; use std::{fs, fmt};
use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use page::handler::{self, PageCache, PageHandlerWaiting}; use futures::{future};
use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; use futures_cpupool::CpuPool;
use mime::Mime; use page::handler::{self, PageCache};
use endpoint::{Endpoint, EndpointInfo, EndpointPath, Request, Response};
use hyper::mime::Mime;
use Embeddable; use Embeddable;
#[derive(Debug, Clone)] #[derive(Clone)]
pub struct LocalPageEndpoint { pub struct Dapp {
pool: CpuPool,
path: PathBuf, path: PathBuf,
mime: Option<Mime>, mime: Option<Mime>,
info: Option<EndpointInfo>, info: Option<EndpointInfo>,
@ -32,23 +34,37 @@ pub struct LocalPageEndpoint {
embeddable_on: Embeddable, embeddable_on: Embeddable,
} }
impl LocalPageEndpoint { impl fmt::Debug for Dapp {
pub fn new(path: PathBuf, info: EndpointInfo, cache: PageCache, embeddable_on: Embeddable) -> Self { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
LocalPageEndpoint { fmt.debug_struct("Dapp")
path: path, .field("path", &self.path)
.field("mime", &self.mime)
.field("info", &self.info)
.field("cache", &self.cache)
.field("embeddable_on", &self.embeddable_on)
.finish()
}
}
impl Dapp {
pub fn new(pool: CpuPool, path: PathBuf, info: EndpointInfo, cache: PageCache, embeddable_on: Embeddable) -> Self {
Dapp {
pool,
path,
mime: None, mime: None,
info: Some(info), info: Some(info),
cache: cache, cache,
embeddable_on: embeddable_on, embeddable_on,
} }
} }
pub fn single_file(path: PathBuf, mime: Mime, cache: PageCache) -> Self { pub fn single_file(pool: CpuPool, path: PathBuf, mime: Mime, cache: PageCache) -> Self {
LocalPageEndpoint { Dapp {
path: path, pool,
path,
mime: Some(mime), mime: Some(mime),
info: None, info: None,
cache: cache, cache,
embeddable_on: None, embeddable_on: None,
} }
} }
@ -57,125 +73,75 @@ impl LocalPageEndpoint {
self.path.clone() self.path.clone()
} }
fn page_handler_with_mime(&self, path: EndpointPath, mime: &Mime) -> handler::PageHandler<LocalSingleFile> { fn get_file(&self, path: &EndpointPath) -> Option<LocalFile> {
handler::PageHandler {
app: LocalSingleFile { path: self.path.clone(), mime: format!("{}", mime) },
prefix: None,
path: path,
file: handler::ServedFile::new(None),
safe_to_embed_on: self.embeddable_on.clone(),
cache: self.cache,
}
}
fn page_handler(&self, path: EndpointPath) -> handler::PageHandler<LocalDapp> {
handler::PageHandler {
app: LocalDapp { path: self.path.clone() },
prefix: None,
path: path,
file: handler::ServedFile::new(None),
safe_to_embed_on: self.embeddable_on.clone(),
cache: self.cache,
}
}
pub fn to_page_handler(&self, path: EndpointPath) -> Box<PageHandlerWaiting> {
if let Some(ref mime) = self.mime { if let Some(ref mime) = self.mime {
Box::new(self.page_handler_with_mime(path, mime)) return LocalFile::from_path(&self.path, mime.to_owned());
} else {
Box::new(self.page_handler(path))
} }
let mut file_path = self.path.to_owned();
if path.has_no_params() {
file_path.push("index.html");
} else {
for part in &path.app_params {
file_path.push(part);
}
}
let mime = mime_guess::guess_mime_type(&file_path);
LocalFile::from_path(&file_path, mime)
}
pub fn to_response(&self, path: &EndpointPath) -> Response {
let (reader, response) = handler::PageHandler {
file: self.get_file(path),
cache: self.cache,
safe_to_embed_on: self.embeddable_on.clone(),
}.into_response();
self.pool.spawn(reader).forget();
Box::new(future::ok(response))
} }
} }
impl Endpoint for LocalPageEndpoint { impl Endpoint for Dapp {
fn info(&self) -> Option<&EndpointInfo> { fn info(&self) -> Option<&EndpointInfo> {
self.info.as_ref() self.info.as_ref()
} }
fn to_handler(&self, path: EndpointPath) -> Box<Handler> { fn respond(&self, path: EndpointPath, _req: Request) -> Response {
if let Some(ref mime) = self.mime { self.to_response(&path)
Box::new(self.page_handler_with_mime(path, mime))
} else {
Box::new(self.page_handler(path))
}
}
}
struct LocalSingleFile {
path: PathBuf,
mime: String,
}
impl handler::Dapp for LocalSingleFile {
type DappFile = LocalFile;
fn file(&self, _path: &str) -> Option<Self::DappFile> {
LocalFile::from_path(&self.path, Some(&self.mime))
}
}
struct LocalDapp {
path: PathBuf,
}
impl handler::Dapp for LocalDapp {
type DappFile = LocalFile;
fn file(&self, file_path: &str) -> Option<Self::DappFile> {
let mut path = self.path.clone();
for part in file_path.split('/') {
path.push(part);
}
LocalFile::from_path(&path, None)
} }
} }
struct LocalFile { struct LocalFile {
content_type: String, content_type: Mime,
buffer: [u8; 4096],
file: fs::File, file: fs::File,
len: u64,
pos: u64,
} }
impl LocalFile { impl LocalFile {
fn from_path<P: AsRef<Path>>(path: P, mime: Option<&str>) -> Option<Self> { fn from_path<P: AsRef<Path>>(path: P, content_type: Mime) -> Option<Self> {
trace!(target: "dapps", "Local file: {:?}", path.as_ref());
// Check if file exists // Check if file exists
fs::File::open(&path).ok().map(|file| { fs::File::open(&path).ok().map(|file| {
let content_type = mime.map(|mime| mime.to_owned())
.unwrap_or_else(|| mime_guess::guess_mime_type(path).to_string());
let len = file.metadata().ok().map_or(0, |meta| meta.len());
LocalFile { LocalFile {
content_type: content_type, content_type,
buffer: [0; 4096], file,
file: file,
pos: 0,
len: len,
} }
}) })
} }
} }
impl handler::DappFile for LocalFile { impl handler::DappFile for LocalFile {
fn content_type(&self) -> &str { type Reader = fs::File;
fn content_type(&self) -> &Mime {
&self.content_type &self.content_type
} }
fn is_drained(&self) -> bool { fn into_reader(self) -> Self::Reader {
self.pos == self.len self.file
}
fn next_chunk(&mut self) -> &[u8] {
let _ = self.file.seek(SeekFrom::Start(self.pos));
if let Ok(n) = self.file.read(&mut self.buffer) {
&self.buffer[0..n]
} else {
&self.buffer[0..0]
}
}
fn bytes_written(&mut self, bytes: usize) {
self.pos += bytes as u64;
} }
} }

View File

@ -15,11 +15,9 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
mod builtin; pub mod builtin;
mod local; pub mod local;
mod handler; mod handler;
pub use self::local::LocalPageEndpoint; pub use self::handler::PageCache;
pub use self::builtin::PageEndpoint;
pub use self::handler::{PageCache, PageHandlerWaiting};

View File

@ -16,9 +16,11 @@
//! Serving ProxyPac file //! Serving ProxyPac file
use endpoint::{Endpoint, Handler, EndpointPath};
use handlers::ContentHandler;
use apps::HOME_PAGE; use apps::HOME_PAGE;
use endpoint::{Endpoint, Request, Response, EndpointPath};
use futures::future;
use handlers::ContentHandler;
use hyper::mime;
use {address, Embeddable}; use {address, Embeddable};
pub struct ProxyPac { pub struct ProxyPac {
@ -33,7 +35,7 @@ impl ProxyPac {
} }
impl Endpoint for ProxyPac { impl Endpoint for ProxyPac {
fn to_handler(&self, path: EndpointPath) -> Box<Handler> { fn respond(&self, path: EndpointPath, _req: Request) -> Response {
let ui = self.embeddable let ui = self.embeddable
.as_ref() .as_ref()
.map(|ref parent| address(&parent.host, parent.port)) .map(|ref parent| address(&parent.host, parent.port))
@ -57,7 +59,9 @@ function FindProxyForURL(url, host) {{
"#, "#,
HOME_PAGE, self.dapps_domain, path.host, path.port, ui); HOME_PAGE, self.dapps_domain, path.host, path.port, ui);
Box::new(ContentHandler::ok(content, mime!(Application/Javascript))) Box::new(future::ok(
ContentHandler::ok(content, mime::TEXT_JAVASCRIPT).into()
))
} }
} }

View File

@ -17,18 +17,16 @@
//! Router implementation //! Router implementation
//! Dispatch requests to proper application. //! Dispatch requests to proper application.
use std::cmp;
use std::sync::Arc; use std::sync::Arc;
use std::collections::HashMap; use std::collections::HashMap;
use url::{Url, Host}; use futures::future;
use hyper::{self, server, header, Control}; use hyper::{self, header, Uri};
use hyper::net::HttpStream;
use jsonrpc_http_server as http; use jsonrpc_http_server as http;
use apps; use apps;
use apps::fetcher::Fetcher; use apps::fetcher::Fetcher;
use endpoint::{Endpoint, EndpointPath, Handler}; use endpoint::{self, Endpoint, EndpointPath};
use Endpoints; use Endpoints;
use handlers; use handlers;
use Embeddable; use Embeddable;
@ -43,6 +41,13 @@ pub enum SpecialEndpoint {
None, None,
} }
enum Response {
Some(endpoint::Response),
None(hyper::Request),
}
/// An endpoint router.
/// Dispatches the request to particular Endpoint by requested uri/path.
pub struct Router { pub struct Router {
endpoints: Option<Endpoints>, endpoints: Option<Endpoints>,
fetch: Arc<Fetcher>, fetch: Arc<Fetcher>,
@ -52,11 +57,10 @@ pub struct Router {
} }
impl Router { impl Router {
fn resolve_request(&self, req: &server::Request<HttpStream>, control: Control, refresh_dapps: bool) -> (bool, Option<Box<Handler>>) { fn resolve_request(&self, req: hyper::Request, refresh_dapps: bool) -> (bool, Response) {
// Choose proper handler depending on path / domain // Choose proper handler depending on path / domain
let url = handlers::extract_url(req); let endpoint = extract_endpoint(req.uri(), req.headers().get(), &self.dapps_domain);
let endpoint = extract_endpoint(&url, &self.dapps_domain); let referer = extract_referer_endpoint(&req, &self.dapps_domain);
let referer = extract_referer_endpoint(req, &self.dapps_domain);
let is_utils = endpoint.1 == SpecialEndpoint::Utils; let is_utils = endpoint.1 == SpecialEndpoint::Utils;
let is_get_request = *req.method() == hyper::Method::Get; let is_get_request = *req.method() == hyper::Method::Get;
let is_head_request = *req.method() == hyper::Method::Head; let is_head_request = *req.method() == hyper::Method::Head;
@ -64,47 +68,51 @@ impl Router {
.as_ref() .as_ref()
.map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp)); .map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp));
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", req.uri(), req);
debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); debug!(target: "dapps", "Handling endpoint request: {:?}, referer: {:?}", endpoint, referer);
(is_utils, match (endpoint.0, endpoint.1, referer) { (is_utils, match (endpoint.0, endpoint.1, referer) {
// Handle invalid web requests that we can recover from // Handle invalid web requests that we can recover from
(ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) (ref path, SpecialEndpoint::None, Some(ref referer))
if referer.app_id == apps::WEB_PATH if referer.app_id == apps::WEB_PATH
&& has_dapp(apps::WEB_PATH) && has_dapp(apps::WEB_PATH)
&& !is_web_endpoint(path) && !is_web_endpoint(path)
=> =>
{ {
trace!(target: "dapps", "Redirecting to correct web request: {:?}", referer_url); let token = referer.app_params.get(0).map(String::as_str).unwrap_or("");
let len = cmp::min(referer_url.path.len(), 2); // /web/<encoded>/ let requested = req.uri().path();
let base = referer_url.path[..len].join("/"); let query = req.uri().query().map_or_else(String::new, |query| format!("?{}", query));
let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); let redirect_url = format!("/{}/{}{}{}", apps::WEB_PATH, token, requested, query);
Some(handlers::Redirection::boxed(&format!("/{}/{}", base, requested))) trace!(target: "dapps", "Redirecting to correct web request: {:?}", redirect_url);
Response::Some(Box::new(future::ok(
handlers::Redirection::new(redirect_url).into()
)))
}, },
// First check special endpoints // First check special endpoints
(ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => {
trace!(target: "dapps", "Resolving to special endpoint."); trace!(target: "dapps", "Resolving to special endpoint.");
self.special.get(endpoint) let special = self.special.get(endpoint).expect("special known to contain key; qed");
.expect("special known to contain key; qed") match *special {
.as_ref() Some(ref special) => Response::Some(special.respond(path.clone().unwrap_or_default(), req)),
.map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) None => Response::None(req),
}
}, },
// Then delegate to dapp // Then delegate to dapp
(Some(ref path), _, _) if has_dapp(&path.app_id) => { (Some(ref path), _, _) if has_dapp(&path.app_id) => {
trace!(target: "dapps", "Resolving to local/builtin dapp."); trace!(target: "dapps", "Resolving to local/builtin dapp.");
Some(self.endpoints Response::Some(self.endpoints
.as_ref() .as_ref()
.expect("endpoints known to be set; qed") .expect("endpoints known to be set; qed")
.endpoints .endpoints
.read() .read()
.get(&path.app_id) .get(&path.app_id)
.expect("endpoints known to contain key; qed") .expect("endpoints known to contain key; qed")
.to_async_handler(path.clone(), control)) .respond(path.clone(), req))
}, },
// Try to resolve and fetch the dapp // Try to resolve and fetch the dapp
(Some(ref path), _, _) if self.fetch.contains(&path.app_id) => { (Some(ref path), _, _) if self.fetch.contains(&path.app_id) => {
trace!(target: "dapps", "Resolving to fetchable content."); trace!(target: "dapps", "Resolving to fetchable content.");
Some(self.fetch.to_async_handler(path.clone(), control)) Response::Some(self.fetch.respond(path.clone(), req))
}, },
// 404 for non-existent content (only if serving endpoints and not homepage) // 404 for non-existent content (only if serving endpoints and not homepage)
(Some(ref path), _, _) (Some(ref path), _, _)
@ -117,45 +125,50 @@ impl Router {
if refresh_dapps { if refresh_dapps {
debug!(target: "dapps", "Refreshing dapps and re-trying."); debug!(target: "dapps", "Refreshing dapps and re-trying.");
self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps()); self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps());
return self.resolve_request(req, control, false) return self.resolve_request(req, false);
} else { } else {
Some(Box::new(handlers::ContentHandler::error( Response::Some(Box::new(future::ok(handlers::ContentHandler::error(
hyper::StatusCode::NotFound, hyper::StatusCode::NotFound,
"404 Not Found", "404 Not Found",
"Requested content was not found.", "Requested content was not found.",
None, None,
self.embeddable_on.clone(), self.embeddable_on.clone(),
))) ).into())))
} }
}, },
// Any other GET|HEAD requests to home page. // Any other GET|HEAD requests to home page.
_ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => { _ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => {
self.special.get(&SpecialEndpoint::Home) let special = self.special.get(&SpecialEndpoint::Home).expect("special known to contain key; qed");
.expect("special known to contain key; qed") match *special {
.as_ref() Some(ref special) => {
.map(|special| special.to_async_handler(Default::default(), control)) let mut endpoint = EndpointPath::default();
endpoint.app_params = req.uri().path().split('/').map(str::to_owned).collect();
Response::Some(special.respond(endpoint, req))
},
None => Response::None(req),
}
}, },
// RPC by default // RPC by default
_ => { _ => {
trace!(target: "dapps", "Resolving to RPC call."); trace!(target: "dapps", "Resolving to RPC call.");
None Response::None(req)
} }
}) })
} }
} }
impl http::RequestMiddleware for Router { impl http::RequestMiddleware for Router {
fn on_request(&self, req: &server::Request<HttpStream>, control: &Control) -> http::RequestMiddlewareAction { fn on_request(&self, req: hyper::Request) -> http::RequestMiddlewareAction {
let control = control.clone();
let is_origin_set = req.headers().get::<header::Origin>().is_some(); let is_origin_set = req.headers().get::<header::Origin>().is_some();
let (is_utils, handler) = self.resolve_request(req, control, self.endpoints.is_some()); let (is_utils, response) = self.resolve_request(req, self.endpoints.is_some());
match handler { match response {
Some(handler) => http::RequestMiddlewareAction::Respond { Response::Some(response) => http::RequestMiddlewareAction::Respond {
should_validate_hosts: !is_utils, should_validate_hosts: !is_utils,
handler: handler, response,
}, },
None => http::RequestMiddlewareAction::Proceed { Response::None(request) => http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: !is_origin_set, should_continue_on_invalid_cors: !is_origin_set,
request,
}, },
} }
} }
@ -186,41 +199,44 @@ fn is_web_endpoint(path: &Option<EndpointPath>) -> bool {
} }
} }
fn extract_referer_endpoint(req: &server::Request<HttpStream>, dapps_domain: &str) -> Option<(EndpointPath, Url)> { fn extract_referer_endpoint(req: &hyper::Request, dapps_domain: &str) -> Option<EndpointPath> {
let referer = req.headers().get::<header::Referer>(); let referer = req.headers().get::<header::Referer>();
let url = referer.and_then(|referer| Url::parse(&referer.0).ok()); let url = referer.and_then(|referer| referer.parse().ok());
url.and_then(|url| { url.and_then(|url| {
let option = Some(url); extract_url_referer_endpoint(&url, dapps_domain).or_else(|| {
extract_url_referer_endpoint(&option, dapps_domain).or_else(|| { extract_endpoint(&url, None, dapps_domain).0
extract_endpoint(&option, dapps_domain).0.map(|endpoint| (endpoint, option.expect("Just wrapped; qed")))
}) })
}) })
} }
fn extract_url_referer_endpoint(url: &Option<Url>, dapps_domain: &str) -> Option<(EndpointPath, Url)> { fn extract_url_referer_endpoint(url: &Uri, dapps_domain: &str) -> Option<EndpointPath> {
let query = url.as_ref().and_then(|url| url.query.as_ref()); let query = url.query();
match (url, query) { match query {
(&Some(ref url), Some(ref query)) if query.starts_with(apps::URL_REFERER) => { Some(query) if query.starts_with(apps::URL_REFERER) => {
let referer_url = format!("http://{}:{}/{}", url.host, url.port, &query[apps::URL_REFERER.len()..]); let scheme = url.scheme().unwrap_or("http");
let host = url.host().unwrap_or("unknown");
let port = default_port(url, None);
let referer_url = format!("{}://{}:{}/{}", scheme, host, port, &query[apps::URL_REFERER.len()..]);
debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url); debug!(target: "dapps", "Recovering referer from query parameter: {}", referer_url);
let referer_url = Url::parse(&referer_url).ok(); if let Some(referer_url) = referer_url.parse().ok() {
extract_endpoint(&referer_url, dapps_domain).0.map(|endpoint| { extract_endpoint(&referer_url, None, dapps_domain).0
(endpoint, referer_url.expect("Endpoint returned only when url `is_some`").clone()) } else {
}) None
}
}, },
_ => None, _ => None,
} }
} }
fn extract_endpoint(url: &Option<Url>, dapps_domain: &str) -> (Option<EndpointPath>, SpecialEndpoint) { fn extract_endpoint(url: &Uri, extra_host: Option<&header::Host>, dapps_domain: &str) -> (Option<EndpointPath>, SpecialEndpoint) {
fn special_endpoint(url: &Url) -> SpecialEndpoint { fn special_endpoint(path: &[&str]) -> SpecialEndpoint {
if url.path.len() <= 1 { if path.len() <= 1 {
return SpecialEndpoint::None; return SpecialEndpoint::None;
} }
match url.path[0].as_ref() { match path[0].as_ref() {
apps::RPC_PATH => SpecialEndpoint::Rpc, apps::RPC_PATH => SpecialEndpoint::Rpc,
apps::API_PATH => SpecialEndpoint::Api, apps::API_PATH => SpecialEndpoint::Api,
apps::UTILS_PATH => SpecialEndpoint::Utils, apps::UTILS_PATH => SpecialEndpoint::Utils,
@ -229,114 +245,162 @@ fn extract_endpoint(url: &Option<Url>, dapps_domain: &str) -> (Option<EndpointPa
} }
} }
match *url { let port = default_port(url, extra_host.as_ref().and_then(|h| h.port()));
Some(ref url) => match url.host { let host = url.host().or_else(|| extra_host.as_ref().map(|h| h.hostname()));
Host::Domain(ref domain) if domain.ends_with(dapps_domain) => { let query = url.query().map(str::to_owned);
let id = &domain[0..(domain.len() - dapps_domain.len())]; let mut path_segments = url.path().split('/').skip(1).collect::<Vec<_>>();
let (id, params) = if let Some(split) = id.rfind('.') { trace!(
let (params, id) = id.split_at(split); target: "dapps",
(id[1..].to_owned(), [params.to_owned()].into_iter().chain(&url.path).cloned().collect()) "Extracting endpoint from: {:?} (dapps: {}). Got host {:?}:{} with path {:?}",
} else { url, dapps_domain, host, port, path_segments
(id.to_owned(), url.path.clone()) );
}; match host {
Some(host) if host.ends_with(dapps_domain) => {
let id = &host[0..(host.len() - dapps_domain.len())];
let special = special_endpoint(&path_segments);
(Some(EndpointPath { // remove special endpoint id from params
app_id: id, if special != SpecialEndpoint::None {
app_params: params, path_segments.remove(0);
host: domain.clone(), }
port: url.port,
using_dapps_domains: true, let (app_id, app_params) = if let Some(split) = id.rfind('.') {
}), special_endpoint(url)) let (params, id) = id.split_at(split);
}, path_segments.insert(0, params);
_ if url.path.len() > 1 => { (id[1..].to_owned(), path_segments)
let id = url.path[0].to_owned(); } else {
(Some(EndpointPath { (id.to_owned(), path_segments)
app_id: id, };
app_params: url.path[1..].to_vec(),
host: format!("{}", url.host), (Some(EndpointPath {
port: url.port, app_id,
using_dapps_domains: false, app_params: app_params.into_iter().map(Into::into).collect(),
}), special_endpoint(url)) query,
}, host: host.to_owned(),
_ => (None, special_endpoint(url)), port,
using_dapps_domains: true,
}), special)
}, },
_ => (None, SpecialEndpoint::None) Some(host) if path_segments.len() > 1 => {
let special = special_endpoint(&path_segments);
let id = path_segments.remove(0);
(Some(EndpointPath {
app_id: id.to_owned(),
app_params: path_segments.into_iter().map(Into::into).collect(),
query,
host: host.to_owned(),
port,
using_dapps_domains: false,
}), special)
},
_ => (None, special_endpoint(&path_segments)),
} }
} }
#[test] fn default_port(url: &Uri, extra_port: Option<u16>) -> u16 {
fn should_extract_endpoint() { let scheme = url.scheme().unwrap_or("http");
let dapps_domain = ".web3.site"; url.port().or(extra_port).unwrap_or_else(|| match scheme {
assert_eq!(extract_endpoint(&None, dapps_domain), (None, SpecialEndpoint::None)); "http" => 80,
"https" => 443,
// With path prefix _ => 80,
assert_eq!( })
extract_endpoint(&Url::parse("http://localhost:8080/status/index.html").ok(), dapps_domain), }
(Some(EndpointPath {
app_id: "status".to_owned(), #[cfg(test)]
app_params: vec!["index.html".to_owned()], mod tests {
host: "localhost".to_owned(), use super::{SpecialEndpoint, EndpointPath, extract_endpoint};
port: 8080,
using_dapps_domains: false, #[test]
}), SpecialEndpoint::None) fn should_extract_endpoint() {
); let dapps_domain = ".web3.site";
// With path prefix // With path prefix
assert_eq!( assert_eq!(
extract_endpoint(&Url::parse("http://localhost:8080/rpc/").ok(), dapps_domain), extract_endpoint(&"http://localhost:8080/status/index.html?q=1".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath { (Some(EndpointPath {
app_id: "rpc".to_owned(), app_id: "status".to_owned(),
app_params: vec!["".to_owned()], app_params: vec!["index.html".to_owned()],
host: "localhost".to_owned(), query: Some("q=1".into()),
port: 8080, host: "localhost".to_owned(),
using_dapps_domains: false, port: 8080,
}), SpecialEndpoint::Rpc) using_dapps_domains: false,
); }), SpecialEndpoint::None)
);
assert_eq!(
extract_endpoint(&Url::parse("http://my.status.web3.site/parity-utils/inject.js").ok(), dapps_domain), // With path prefix
(Some(EndpointPath { assert_eq!(
app_id: "status".to_owned(), extract_endpoint(&"http://localhost:8080/rpc/".parse().unwrap(), None, dapps_domain),
app_params: vec!["my".to_owned(), "parity-utils".into(), "inject.js".into()], (Some(EndpointPath {
host: "my.status.web3.site".to_owned(), app_id: "rpc".to_owned(),
port: 80, app_params: vec!["".to_owned()],
using_dapps_domains: true, query: None,
}), SpecialEndpoint::Utils) host: "localhost".to_owned(),
); port: 8080,
using_dapps_domains: false,
// By Subdomain }), SpecialEndpoint::Rpc)
assert_eq!( );
extract_endpoint(&Url::parse("http://status.web3.site/test.html").ok(), dapps_domain),
(Some(EndpointPath { assert_eq!(
app_id: "status".to_owned(), extract_endpoint(&"http://my.status.web3.site/parity-utils/inject.js".parse().unwrap(), None, dapps_domain),
app_params: vec!["test.html".to_owned()], (Some(EndpointPath {
host: "status.web3.site".to_owned(), app_id: "status".to_owned(),
port: 80, app_params: vec!["my".into(), "inject.js".into()],
using_dapps_domains: true, query: None,
}), SpecialEndpoint::None) host: "my.status.web3.site".to_owned(),
); port: 80,
using_dapps_domains: true,
// RPC by subdomain }), SpecialEndpoint::Utils)
assert_eq!( );
extract_endpoint(&Url::parse("http://my.status.web3.site/rpc/").ok(), dapps_domain),
(Some(EndpointPath { assert_eq!(
app_id: "status".to_owned(), extract_endpoint(&"http://my.status.web3.site/inject.js".parse().unwrap(), None, dapps_domain),
app_params: vec!["my".to_owned(), "rpc".into(), "".into()], (Some(EndpointPath {
host: "my.status.web3.site".to_owned(), app_id: "status".to_owned(),
port: 80, app_params: vec!["my".into(), "inject.js".into()],
using_dapps_domains: true, query: None,
}), SpecialEndpoint::Rpc) host: "my.status.web3.site".to_owned(),
); port: 80,
using_dapps_domains: true,
// API by subdomain }), SpecialEndpoint::None)
assert_eq!( );
extract_endpoint(&Url::parse("http://my.status.web3.site/api/").ok(), dapps_domain),
(Some(EndpointPath { // By Subdomain
app_id: "status".to_owned(), assert_eq!(
app_params: vec!["my".to_owned(), "api".into(), "".into()], extract_endpoint(&"http://status.web3.site/test.html".parse().unwrap(), None, dapps_domain),
host: "my.status.web3.site".to_owned(), (Some(EndpointPath {
port: 80, app_id: "status".to_owned(),
using_dapps_domains: true, app_params: vec!["test.html".to_owned()],
}), SpecialEndpoint::Api) query: None,
); host: "status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::None)
);
// RPC by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/rpc/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Rpc)
);
// API by subdomain
assert_eq!(
extract_endpoint(&"http://my.status.web3.site/api/".parse().unwrap(), None, dapps_domain),
(Some(EndpointPath {
app_id: "status".to_owned(),
app_params: vec!["my".into(), "".into()],
query: None,
host: "my.status.web3.site".to_owned(),
port: 80,
using_dapps_domains: true,
}), SpecialEndpoint::Api)
);
}
} }

View File

@ -49,6 +49,7 @@ fn should_handle_ping() {
"\ "\
POST /api/ping HTTP/1.1\r\n\ POST /api/ping HTTP/1.1\r\n\
Host: home.parity\r\n\ Host: home.parity\r\n\
Content-Type: application/json\r\n\
Connection: close\r\n\ Connection: close\r\n\
\r\n\ \r\n\
{} {}

View File

@ -18,7 +18,7 @@ use devtools::http_client;
use rustc_hex::FromHex; use rustc_hex::FromHex;
use tests::helpers::{ use tests::helpers::{
serve_with_registrar, serve_with_registrar_and_sync, serve_with_fetch, serve_with_registrar, serve_with_registrar_and_sync, serve_with_fetch,
serve_with_registrar_and_fetch, serve_with_registrar_and_fetch_and_threads, serve_with_registrar_and_fetch,
request, assert_security_headers_for_embed, request, assert_security_headers_for_embed,
}; };
@ -171,6 +171,8 @@ fn should_return_fetched_dapp_content() {
r#"18 r#"18
<h1>Hello Gavcoin!</h1> <h1>Hello Gavcoin!</h1>
0
"# "#
); );
@ -257,7 +259,7 @@ fn should_not_request_content_twice() {
use std::thread; use std::thread;
// given // given
let (server, fetch, registrar) = serve_with_registrar_and_fetch_and_threads(true); let (server, fetch, registrar) = serve_with_registrar_and_fetch();
let gavcoin = GAVCOIN_ICON.from_hex().unwrap(); let gavcoin = GAVCOIN_ICON.from_hex().unwrap();
registrar.set_result( registrar.set_result(
"2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".parse().unwrap(), "2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e".parse().unwrap(),

View File

@ -94,7 +94,7 @@ impl FakeFetch {
} }
impl Fetch for FakeFetch { impl Fetch for FakeFetch {
type Result = futures::BoxFuture<fetch::Response, fetch::Error>; type Result = Box<Future<Item = fetch::Response, Error = fetch::Error> + Send>;
fn new() -> Result<Self, fetch::Error> where Self: Sized { fn new() -> Result<Self, fetch::Error> where Self: Sized {
Ok(FakeFetch::default()) Ok(FakeFetch::default())
@ -117,6 +117,17 @@ impl Fetch for FakeFetch {
tx.send(fetch::Response::from_reader(cursor)).unwrap(); tx.send(fetch::Response::from_reader(cursor)).unwrap();
}); });
rx.map_err(|_| fetch::Error::Aborted).boxed() Box::new(rx.map_err(|_| fetch::Error::Aborted))
}
fn process_and_forget<F, I, E>(&self, f: F) where
F: Future<Item=I, Error=E> + Send + 'static,
I: Send + 'static,
E: Send + 'static,
{
// Spawn the task in a separate thread.
thread::spawn(|| {
let _ = f.wait();
});
} }
} }

View File

@ -22,12 +22,12 @@ use std::sync::Arc;
use env_logger::LogBuilder; use env_logger::LogBuilder;
use jsonrpc_core::IoHandler; use jsonrpc_core::IoHandler;
use jsonrpc_http_server::{self as http, Host, DomainsValidation}; use jsonrpc_http_server::{self as http, Host, DomainsValidation};
use parity_reactor::Remote;
use devtools::http_client; use devtools::http_client;
use hash_fetch::urlhint::ContractClient; use hash_fetch::urlhint::ContractClient;
use fetch::{Fetch, Client as FetchClient}; use fetch::{Fetch, Client as FetchClient};
use node_health::{NodeHealth, TimeChecker, CpuPool}; use node_health::{NodeHealth, TimeChecker, CpuPool};
use parity_reactor::Remote;
use {Middleware, SyncStatus, WebProxyTokens}; use {Middleware, SyncStatus, WebProxyTokens};
@ -55,7 +55,7 @@ fn init_logger() {
} }
} }
pub fn init_server<F, B>(process: F, io: IoHandler, remote: Remote) -> (Server, Arc<FakeRegistrar>) where pub fn init_server<F, B>(process: F, io: IoHandler) -> (Server, Arc<FakeRegistrar>) where
F: FnOnce(ServerBuilder) -> ServerBuilder<B>, F: FnOnce(ServerBuilder) -> ServerBuilder<B>,
B: Fetch, B: Fetch,
{ {
@ -64,11 +64,9 @@ pub fn init_server<F, B>(process: F, io: IoHandler, remote: Remote) -> (Server,
let mut dapps_path = env::temp_dir(); let mut dapps_path = env::temp_dir();
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
let server = process(ServerBuilder::new( let mut builder = ServerBuilder::new(&dapps_path, registrar.clone());
&dapps_path, registrar.clone(), remote, builder.signer_address = Some(("127.0.0.1".into(), SIGNER_PORT));
)) let server = process(builder).start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap();
.signer_address(Some(("127.0.0.1".into(), SIGNER_PORT)))
.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap();
( (
server, server,
registrar, registrar,
@ -76,34 +74,34 @@ pub fn init_server<F, B>(process: F, io: IoHandler, remote: Remote) -> (Server,
} }
pub fn serve_with_rpc(io: IoHandler) -> Server { pub fn serve_with_rpc(io: IoHandler) -> Server {
init_server(|builder| builder, io, Remote::new_sync()).0 init_server(|builder| builder, io).0
} }
pub fn serve_hosts(hosts: Option<Vec<String>>) -> Server { pub fn serve_hosts(hosts: Option<Vec<String>>) -> Server {
let hosts = hosts.map(|hosts| hosts.into_iter().map(Into::into).collect()); let hosts = hosts.map(|hosts| hosts.into_iter().map(Into::into).collect());
init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 init_server(|mut builder| {
builder.allowed_hosts = hosts.into();
builder
}, Default::default()).0
} }
pub fn serve_with_registrar() -> (Server, Arc<FakeRegistrar>) { pub fn serve_with_registrar() -> (Server, Arc<FakeRegistrar>) {
init_server(|builder| builder, Default::default(), Remote::new_sync()) init_server(|builder| builder, Default::default())
} }
pub fn serve_with_registrar_and_sync() -> (Server, Arc<FakeRegistrar>) { pub fn serve_with_registrar_and_sync() -> (Server, Arc<FakeRegistrar>) {
init_server(|builder| { init_server(|mut builder| {
builder.sync_status(Arc::new(FakeSync(true))) builder.sync_status = Arc::new(FakeSync(true));
}, Default::default(), Remote::new_sync()) builder
}, Default::default())
} }
pub fn serve_with_registrar_and_fetch() -> (Server, FakeFetch, Arc<FakeRegistrar>) { pub fn serve_with_registrar_and_fetch() -> (Server, FakeFetch, Arc<FakeRegistrar>) {
serve_with_registrar_and_fetch_and_threads(false)
}
pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Server, FakeFetch, Arc<FakeRegistrar>) {
let fetch = FakeFetch::default(); let fetch = FakeFetch::default();
let f = fetch.clone(); let f = fetch.clone();
let (server, reg) = init_server(move |builder| { let (server, reg) = init_server(move |builder| {
builder.fetch(f.clone()) builder.fetch(f.clone())
}, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); }, Default::default());
(server, fetch, reg) (server, fetch, reg)
} }
@ -111,19 +109,25 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv
pub fn serve_with_fetch(web_token: &'static str, domain: &'static str) -> (Server, FakeFetch) { pub fn serve_with_fetch(web_token: &'static str, domain: &'static str) -> (Server, FakeFetch) {
let fetch = FakeFetch::default(); let fetch = FakeFetch::default();
let f = fetch.clone(); let f = fetch.clone();
let (server, _) = init_server(move |builder| { let (server, _) = init_server(move |mut builder| {
builder builder.web_proxy_tokens = Arc::new(move |token| {
.fetch(f.clone()) if &token == web_token { Some(domain.into()) } else { None }
.web_proxy_tokens(Arc::new(move |token| { });
if &token == web_token { Some(domain.into()) } else { None } builder.fetch(f.clone())
})) }, Default::default());
}, Default::default(), Remote::new_sync());
(server, fetch) (server, fetch)
} }
pub fn serve() -> Server { pub fn serve() -> Server {
init_server(|builder| builder, Default::default(), Remote::new_sync()).0 init_server(|builder| builder, Default::default()).0
}
pub fn serve_ui() -> Server {
init_server(|mut builder| {
builder.serve_ui = true;
builder
}, Default::default()).0
} }
pub fn request(server: Server, request: &str) -> http_client::Response { pub fn request(server: Server, request: &str) -> http_client::Response {
@ -146,13 +150,13 @@ pub struct ServerBuilder<T: Fetch = FetchClient> {
web_proxy_tokens: Arc<WebProxyTokens>, web_proxy_tokens: Arc<WebProxyTokens>,
signer_address: Option<(String, u16)>, signer_address: Option<(String, u16)>,
allowed_hosts: DomainsValidation<Host>, allowed_hosts: DomainsValidation<Host>,
remote: Remote,
fetch: Option<T>, fetch: Option<T>,
serve_ui: bool,
} }
impl ServerBuilder { impl ServerBuilder {
/// Construct new dapps server /// Construct new dapps server
pub fn new<P: AsRef<Path>>(dapps_path: P, registrar: Arc<ContractClient>, remote: Remote) -> Self { pub fn new<P: AsRef<Path>>(dapps_path: P, registrar: Arc<ContractClient>) -> Self {
ServerBuilder { ServerBuilder {
dapps_path: dapps_path.as_ref().to_owned(), dapps_path: dapps_path.as_ref().to_owned(),
registrar: registrar, registrar: registrar,
@ -160,8 +164,8 @@ impl ServerBuilder {
web_proxy_tokens: Arc::new(|_| None), web_proxy_tokens: Arc::new(|_| None),
signer_address: None, signer_address: None,
allowed_hosts: DomainsValidation::Disabled, allowed_hosts: DomainsValidation::Disabled,
remote: remote,
fetch: None, fetch: None,
serve_ui: false,
} }
} }
} }
@ -176,37 +180,11 @@ impl<T: Fetch> ServerBuilder<T> {
web_proxy_tokens: self.web_proxy_tokens, web_proxy_tokens: self.web_proxy_tokens,
signer_address: self.signer_address, signer_address: self.signer_address,
allowed_hosts: self.allowed_hosts, allowed_hosts: self.allowed_hosts,
remote: self.remote,
fetch: Some(fetch), fetch: Some(fetch),
serve_ui: self.serve_ui,
} }
} }
/// Change default sync status.
pub fn sync_status(mut self, status: Arc<SyncStatus>) -> Self {
self.sync_status = status;
self
}
/// Change default web proxy tokens validator.
pub fn web_proxy_tokens(mut self, tokens: Arc<WebProxyTokens>) -> Self {
self.web_proxy_tokens = tokens;
self
}
/// Change default signer port.
pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self {
self.signer_address = signer_address;
self
}
/// Change allowed hosts.
/// `None` - All hosts are allowed
/// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address)
pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation<Host>) -> Self {
self.allowed_hosts = allowed_hosts;
self
}
/// Asynchronously start server with no authentication, /// Asynchronously start server with no authentication,
/// returns result with `Server` handle on success or an error. /// returns result with `Server` handle on success or an error.
pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result<Server, http::Error> { pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result<Server, http::Error> {
@ -221,8 +199,9 @@ impl<T: Fetch> ServerBuilder<T> {
self.registrar, self.registrar,
self.sync_status, self.sync_status,
self.web_proxy_tokens, self.web_proxy_tokens,
self.remote, Remote::new_sync(),
fetch, fetch,
self.serve_ui,
) )
} }
@ -254,26 +233,39 @@ impl Server {
web_proxy_tokens: Arc<WebProxyTokens>, web_proxy_tokens: Arc<WebProxyTokens>,
remote: Remote, remote: Remote,
fetch: F, fetch: F,
serve_ui: bool,
) -> Result<Server, http::Error> { ) -> Result<Server, http::Error> {
let health = NodeHealth::new( let health = NodeHealth::new(
sync_status.clone(), sync_status.clone(),
TimeChecker::new::<String>(&[], CpuPool::new(1)), TimeChecker::new::<String>(&[], CpuPool::new(1)),
remote.clone(), remote.clone(),
); );
let middleware = Middleware::dapps( let pool = ::futures_cpupool::CpuPool::new(1);
health, let middleware = if serve_ui {
remote, Middleware::ui(
signer_address, pool,
vec![], health,
vec![], DAPPS_DOMAIN.into(),
dapps_path, registrar,
extra_dapps, sync_status,
DAPPS_DOMAIN.into(), fetch,
registrar, )
sync_status, } else {
web_proxy_tokens, Middleware::dapps(
fetch, pool,
); health,
signer_address,
vec![],
vec![],
dapps_path,
extra_dapps,
DAPPS_DOMAIN.into(),
registrar,
sync_status,
web_proxy_tokens,
fetch,
)
};
let mut allowed_hosts: Option<Vec<Host>> = allowed_hosts.into(); let mut allowed_hosts: Option<Vec<Host>> = allowed_hosts.into();
allowed_hosts.as_mut().map(|mut hosts| { allowed_hosts.as_mut().map(|mut hosts| {
@ -295,9 +287,7 @@ impl Server {
pub fn addr(&self) -> &SocketAddr { pub fn addr(&self) -> &SocketAddr {
self.server.as_ref() self.server.as_ref()
.expect("server is always Some at the start; it's consumed only when object is dropped; qed") .expect("server is always Some at the start; it's consumed only when object is dropped; qed")
.addrs() .address()
.first()
.expect("You cannot start the server without binding to at least one address; qed")
} }
} }

View File

@ -17,13 +17,13 @@
use std::str; use std::str;
use std::sync::Arc; use std::sync::Arc;
use std::collections::HashMap; use std::collections::HashMap;
use rustc_hex::FromHex;
use hash_fetch::urlhint::ContractClient;
use bigint::hash::H256; use bigint::hash::H256;
use util::Address;
use bytes::{Bytes, ToPretty}; use bytes::{Bytes, ToPretty};
use hash_fetch::urlhint::{ContractClient, BoxFuture};
use parking_lot::Mutex; use parking_lot::Mutex;
use rustc_hex::FromHex;
use util::Address;
const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2";
const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000";
@ -67,7 +67,7 @@ impl ContractClient for FakeRegistrar {
Ok(REGISTRAR.parse().unwrap()) Ok(REGISTRAR.parse().unwrap())
} }
fn call(&self, address: Address, data: Bytes) -> ::futures::BoxFuture<Bytes, String> { fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
let call = (address.to_hex(), data.to_hex()); let call = (address.to_hex(), data.to_hex());
self.calls.lock().push(call.clone()); self.calls.lock().push(call.clone());
let res = self.responses.lock().get(&call).cloned().expect(&format!("No response for call: {:?}", call)); let res = self.responses.lock().get(&call).cloned().expect(&format!("No response for call: {:?}", call));

62
dapps/src/tests/home.rs Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve_ui, request, assert_security_headers};
#[test]
fn should_serve_home_js() {
// given
let server = serve_ui();
// when
let response = request(server,
"\
GET /inject.js HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
\r\n\
{}
"
);
// then
response.assert_status("HTTP/1.1 200 OK");
response.assert_header("Content-Type", "application/javascript");
assert_eq!(response.body.contains("function(){"), true, "Expected function in: {}", response.body);
assert_security_headers(&response.headers);
}
#[test]
fn should_serve_home() {
// given
let server = serve_ui();
// when
let response = request(server,
"\
GET / HTTP/1.1\r\n\
Host: 127.0.0.1:8080\r\n\
Connection: close\r\n\
\r\n\
{}
"
);
// then
response.assert_status("HTTP/1.1 200 OK");
response.assert_header("Content-Type", "text/html");
assert_security_headers(&response.headers);
}

View File

@ -20,6 +20,7 @@ mod helpers;
mod api; mod api;
mod fetch; mod fetch;
mod home;
mod redirection; mod redirection;
mod rpc; mod rpc;
mod validation; mod validation;

View File

@ -201,6 +201,7 @@ fn should_serve_utils() {
// then // then
response.assert_status("HTTP/1.1 200 OK"); response.assert_status("HTTP/1.1 200 OK");
assert_eq!(response.body.contains("function(){"), true); response.assert_header("Content-Type", "application/javascript");
assert_eq!(response.body.contains("function(){"), true, "Expected function in: {}", response.body);
assert_security_headers(&response.headers); assert_security_headers(&response.headers);
} }

View File

@ -33,7 +33,7 @@ fn should_reject_invalid_host() {
); );
// then // then
assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); response.assert_status("HTTP/1.1 403 Forbidden");
assert!(response.body.contains("Provided Host header is not whitelisted."), response.body); assert!(response.body.contains("Provided Host header is not whitelisted."), response.body);
} }
@ -54,7 +54,7 @@ fn should_allow_valid_host() {
); );
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); response.assert_status("HTTP/1.1 200 OK");
} }
#[test] #[test]
@ -74,7 +74,7 @@ fn should_serve_dapps_domains() {
); );
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); response.assert_status("HTTP/1.1 200 OK");
} }
#[test] #[test]
@ -95,5 +95,5 @@ fn should_allow_parity_utils_even_on_invalid_domain() {
); );
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); response.assert_status("HTTP/1.1 200 OK");
} }

View File

@ -1,150 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! HTTP/HTTPS URL type. Based on URL type from Iron library.
use url_lib::{self};
pub use url_lib::Host;
/// HTTP/HTTPS URL type for Iron.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct Url {
/// Raw url of url
pub raw: url_lib::Url,
/// The host field of the URL, probably a domain.
pub host: Host,
/// The connection port.
pub port: u16,
/// The URL path, the resource to be accessed.
///
/// A *non-empty* vector encoding the parts of the URL path.
/// Empty entries of `""` correspond to trailing slashes.
pub path: Vec<String>,
/// The URL query.
pub query: Option<String>,
/// The URL username field, from the userinfo section of the URL.
///
/// `None` if the `@` character was not part of the input OR
/// if a blank username was provided.
/// Otherwise, a non-empty string.
pub username: Option<String>,
/// The URL password field, from the userinfo section of the URL.
///
/// `None` if the `@` character was not part of the input OR
/// if a blank password was provided.
/// Otherwise, a non-empty string.
pub password: Option<String>,
}
impl Url {
/// Create a URL from a string.
///
/// The input must be a valid URL with a special scheme for this to succeed.
///
/// HTTP and HTTPS are special schemes.
///
/// See: http://url.spec.whatwg.org/#special-scheme
pub fn parse(input: &str) -> Result<Url, String> {
// Parse the string using rust-url, then convert.
match url_lib::Url::parse(input) {
Ok(raw_url) => Url::from_generic_url(raw_url),
Err(e) => Err(format!("{}", e))
}
}
/// Create a `Url` from a `rust-url` `Url`.
pub fn from_generic_url(raw_url: url_lib::Url) -> Result<Url, String> {
// Map empty usernames to None.
let username = match raw_url.username() {
"" => None,
username => Some(username.to_owned())
};
// Map empty passwords to None.
let password = match raw_url.password() {
Some(password) if !password.is_empty() => Some(password.to_owned()),
_ => None,
};
let port = raw_url.port_or_known_default().ok_or_else(|| format!("Unknown port for scheme: `{}`", raw_url.scheme()))?;
let host = raw_url.host().ok_or_else(|| "Valid host, because only data:, mailto: protocols does not have host.".to_owned())?.to_owned();
let path = raw_url.path_segments().ok_or_else(|| "Valid path segments. In HTTP we won't get cannot-be-a-base URLs".to_owned())?
.map(|part| part.to_owned()).collect();
let query = raw_url.query().map(|x| x.to_owned());
Ok(Url {
port: port,
host: host,
path: path,
query: query,
raw: raw_url,
username: username,
password: password,
})
}
}
#[cfg(test)]
mod test {
use super::Url;
#[test]
fn test_default_port() {
assert_eq!(Url::parse("http://example.com/wow").unwrap().port, 80u16);
assert_eq!(Url::parse("https://example.com/wow").unwrap().port, 443u16);
}
#[test]
fn test_explicit_port() {
assert_eq!(Url::parse("http://localhost:3097").unwrap().port, 3097u16);
}
#[test]
fn test_empty_username() {
assert!(Url::parse("http://@example.com").unwrap().username.is_none());
assert!(Url::parse("http://:password@example.com").unwrap().username.is_none());
}
#[test]
fn test_not_empty_username() {
let user = Url::parse("http://john:pass@example.com").unwrap().username;
assert_eq!(user.unwrap(), "john");
let user = Url::parse("http://john:@example.com").unwrap().username;
assert_eq!(user.unwrap(), "john");
}
#[test]
fn test_empty_password() {
assert!(Url::parse("http://michael@example.com").unwrap().password.is_none());
assert!(Url::parse("http://:@example.com").unwrap().password.is_none());
}
#[test]
fn test_not_empty_password() {
let pass = Url::parse("http://michael:pass@example.com").unwrap().password;
assert_eq!(pass.unwrap(), "pass");
let pass = Url::parse("http://:pass@example.com").unwrap().password;
assert_eq!(pass.unwrap(), "pass");
}
}

View File

@ -17,26 +17,23 @@
//! Serving web-based content (proxying) //! Serving web-based content (proxying)
use std::sync::Arc; use std::sync::Arc;
use fetch::{self, Fetch};
use parity_reactor::Remote;
use base32; use base32;
use hyper::{self, server, net, Next, Encoder, Decoder}; use fetch::{self, Fetch};
use hyper::status::StatusCode; use hyper::{mime, StatusCode};
use apps; use apps;
use endpoint::{Endpoint, Handler, EndpointPath}; use endpoint::{Endpoint, EndpointPath, Request, Response};
use futures::future;
use handlers::{ use handlers::{
ContentFetcherHandler, ContentHandler, ContentValidator, ValidatorResponse, ContentFetcherHandler, ContentHandler, ContentValidator, ValidatorResponse,
StreamingHandler, extract_url, StreamingHandler,
}; };
use url::Url;
use {Embeddable, WebProxyTokens}; use {Embeddable, WebProxyTokens};
pub struct Web<F> { pub struct Web<F> {
embeddable_on: Embeddable, embeddable_on: Embeddable,
web_proxy_tokens: Arc<WebProxyTokens>, web_proxy_tokens: Arc<WebProxyTokens>,
remote: Remote,
fetch: F, fetch: F,
} }
@ -44,92 +41,27 @@ impl<F: Fetch> Web<F> {
pub fn boxed( pub fn boxed(
embeddable_on: Embeddable, embeddable_on: Embeddable,
web_proxy_tokens: Arc<WebProxyTokens>, web_proxy_tokens: Arc<WebProxyTokens>,
remote: Remote,
fetch: F, fetch: F,
) -> Box<Endpoint> { ) -> Box<Endpoint> {
Box::new(Web { Box::new(Web {
embeddable_on, embeddable_on,
web_proxy_tokens, web_proxy_tokens,
remote,
fetch, fetch,
}) })
} }
}
impl<F: Fetch> Endpoint for Web<F> { fn extract_target_url(&self, path: &EndpointPath) -> Result<String, ContentHandler> {
fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler> { let token_and_url = path.app_params.get(0)
Box::new(WebHandler {
control: control,
state: State::Initial,
path: path,
remote: self.remote.clone(),
fetch: self.fetch.clone(),
web_proxy_tokens: self.web_proxy_tokens.clone(),
embeddable_on: self.embeddable_on.clone(),
})
}
}
struct WebInstaller {
embeddable_on: Embeddable,
referer: String,
}
impl ContentValidator for WebInstaller {
type Error = String;
fn validate_and_install(&self, response: fetch::Response) -> Result<ValidatorResponse, String> {
let status = StatusCode::from_u16(response.status().to_u16());
let is_html = response.is_html();
let mime = response.content_type().unwrap_or(mime!(Text/Html));
let mut handler = StreamingHandler::new(
response,
status,
mime,
self.embeddable_on.clone(),
);
if is_html {
handler.set_initial_content(&format!(
r#"<script src="/{}/inject.js"></script><script>history.replaceState({{}}, "", "/?{}{}/{}")</script>"#,
apps::UTILS_PATH,
apps::URL_REFERER,
apps::WEB_PATH,
&self.referer,
));
}
Ok(ValidatorResponse::Streaming(handler))
}
}
enum State<F: Fetch> {
Initial,
Error(ContentHandler),
Fetching(ContentFetcherHandler<WebInstaller, F>),
}
struct WebHandler<F: Fetch> {
control: hyper::Control,
state: State<F>,
path: EndpointPath,
remote: Remote,
fetch: F,
web_proxy_tokens: Arc<WebProxyTokens>,
embeddable_on: Embeddable,
}
impl<F: Fetch> WebHandler<F> {
fn extract_target_url(&self, url: Option<Url>) -> Result<String, State<F>> {
let token_and_url = self.path.app_params.get(0)
.map(|encoded| encoded.replace('.', "")) .map(|encoded| encoded.replace('.', ""))
.and_then(|encoded| base32::decode(base32::Alphabet::Crockford, &encoded.to_uppercase())) .and_then(|encoded| base32::decode(base32::Alphabet::Crockford, &encoded.to_uppercase()))
.and_then(|data| String::from_utf8(data).ok()) .and_then(|data| String::from_utf8(data).ok())
.ok_or_else(|| State::Error(ContentHandler::error( .ok_or_else(|| ContentHandler::error(
StatusCode::BadRequest, StatusCode::BadRequest,
"Invalid parameter", "Invalid parameter",
"Couldn't parse given parameter:", "Couldn't parse given parameter:",
self.path.app_params.get(0).map(String::as_str), path.app_params.get(0).map(String::as_str),
self.embeddable_on.clone() self.embeddable_on.clone()
)))?; ))?;
let mut token_it = token_and_url.split('+'); let mut token_it = token_and_url.split('+');
let token = token_it.next(); let token = token_it.next();
@ -139,9 +71,9 @@ impl<F: Fetch> WebHandler<F> {
let domain = match token.and_then(|token| self.web_proxy_tokens.domain(token)) { let domain = match token.and_then(|token| self.web_proxy_tokens.domain(token)) {
Some(domain) => domain, Some(domain) => domain,
_ => { _ => {
return Err(State::Error(ContentHandler::error( return Err(ContentHandler::error(
StatusCode::BadRequest, "Invalid Access Token", "Invalid or old web proxy access token supplied.", Some("Try refreshing the page."), self.embeddable_on.clone() StatusCode::BadRequest, "Invalid Access Token", "Invalid or old web proxy access token supplied.", Some("Try refreshing the page."), self.embeddable_on.clone()
))); ));
} }
}; };
@ -149,95 +81,86 @@ impl<F: Fetch> WebHandler<F> {
let mut target_url = match target_url { let mut target_url = match target_url {
Some(url) if url.starts_with("http://") || url.starts_with("https://") => url.to_owned(), Some(url) if url.starts_with("http://") || url.starts_with("https://") => url.to_owned(),
_ => { _ => {
return Err(State::Error(ContentHandler::error( return Err(ContentHandler::error(
StatusCode::BadRequest, "Invalid Protocol", "Invalid protocol used.", None, self.embeddable_on.clone() StatusCode::BadRequest, "Invalid Protocol", "Invalid protocol used.", None, self.embeddable_on.clone()
))); ));
} }
}; };
if !target_url.starts_with(&*domain) { if !target_url.starts_with(&*domain) {
return Err(State::Error(ContentHandler::error( return Err(ContentHandler::error(
StatusCode::BadRequest, "Invalid Domain", "Dapp attempted to access invalid domain.", Some(&target_url), self.embeddable_on.clone(), StatusCode::BadRequest, "Invalid Domain", "Dapp attempted to access invalid domain.", Some(&target_url), self.embeddable_on.clone(),
))); ));
} }
if !target_url.ends_with("/") { if !target_url.ends_with("/") {
target_url = format!("{}/", target_url); target_url = format!("{}/", target_url);
} }
// TODO [ToDr] Should just use `path.app_params` // Skip the token
let (path, query) = match (&url, self.path.using_dapps_domains) { let query = path.query.as_ref().map_or_else(String::new, |query| format!("?{}", query));
(&Some(ref url), true) => (&url.path[..], &url.query), let path = path.app_params[1..].join("/");
(&Some(ref url), false) => (&url.path[2..], &url.query),
_ => {
return Err(State::Error(ContentHandler::error(
StatusCode::BadRequest, "Invalid URL", "Couldn't parse URL", None, self.embeddable_on.clone()
)));
}
};
let query = match *query { Ok(format!("{}{}{}", target_url, path, query))
Some(ref query) => format!("?{}", query),
None => "".into(),
};
Ok(format!("{}{}{}", target_url, path.join("/"), query))
} }
} }
impl<F: Fetch> server::Handler<net::HttpStream> for WebHandler<F> { impl<F: Fetch> Endpoint for Web<F> {
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next { fn respond(&self, path: EndpointPath, req: Request) -> Response {
let url = extract_url(&request);
// First extract the URL (reject invalid URLs) // First extract the URL (reject invalid URLs)
let target_url = match self.extract_target_url(url) { let target_url = match self.extract_target_url(&path) {
Ok(url) => url, Ok(url) => url,
Err(error) => { Err(response) => {
self.state = error; return Box::new(future::ok(response.into()));
return Next::write();
} }
}; };
let mut handler = ContentFetcherHandler::new( let token = path.app_params.get(0)
target_url, .expect("`target_url` is valid; app_params is not empty;qed")
self.path.clone(), .to_owned();
self.control.clone(),
Box::new(ContentFetcherHandler::new(
req.method(),
&target_url,
path,
WebInstaller { WebInstaller {
embeddable_on: self.embeddable_on.clone(), embeddable_on: self.embeddable_on.clone(),
referer: self.path.app_params.get(0) token,
.expect("`target_url` is valid; app_params is not empty;qed")
.to_owned(),
}, },
self.embeddable_on.clone(), self.embeddable_on.clone(),
self.remote.clone(),
self.fetch.clone(), self.fetch.clone(),
); ))
let res = handler.on_request(request);
self.state = State::Fetching(handler);
res
}
fn on_request_readable(&mut self, decoder: &mut Decoder<net::HttpStream>) -> Next {
match self.state {
State::Initial => Next::end(),
State::Error(ref mut handler) => handler.on_request_readable(decoder),
State::Fetching(ref mut handler) => handler.on_request_readable(decoder),
}
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
match self.state {
State::Initial => Next::end(),
State::Error(ref mut handler) => handler.on_response(res),
State::Fetching(ref mut handler) => handler.on_response(res),
}
}
fn on_response_writable(&mut self, encoder: &mut Encoder<net::HttpStream>) -> Next {
match self.state {
State::Initial => Next::end(),
State::Error(ref mut handler) => handler.on_response_writable(encoder),
State::Fetching(ref mut handler) => handler.on_response_writable(encoder),
}
} }
} }
struct WebInstaller {
embeddable_on: Embeddable,
token: String,
}
impl ContentValidator for WebInstaller {
type Error = String;
fn validate_and_install(self, response: fetch::Response) -> Result<ValidatorResponse, String> {
let status = response.status();
let is_html = response.is_html();
let mime = response.content_type().unwrap_or(mime::TEXT_HTML);
let mut handler = StreamingHandler::new(
response,
status,
mime,
self.embeddable_on,
);
if is_html {
handler.set_initial_content(&format!(
r#"<script src="/{}/inject.js"></script><script>history.replaceState({{}}, "", "/?{}{}/{}")</script>"#,
apps::UTILS_PATH,
apps::URL_REFERER,
apps::WEB_PATH,
&self.token,
));
}
Ok(ValidatorResponse::Streaming(handler))
}
}

View File

@ -12,15 +12,12 @@ build = "build.rs"
[dependencies] [dependencies]
ansi_term = "0.9" ansi_term = "0.9"
bit-set = "0.4"
bloomchain = "0.1" bloomchain = "0.1"
bn = { git = "https://github.com/paritytech/bn" } bn = { git = "https://github.com/paritytech/bn" }
byteorder = "1.0" byteorder = "1.0"
clippy = { version = "0.0.103", optional = true} clippy = { version = "0.0.103", optional = true}
common-types = { path = "types" } common-types = { path = "types" }
crossbeam = "0.2.9" crossbeam = "0.2.9"
env_logger = "0.4"
ethabi = "2.0"
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore-bloom-journal = { path = "../util/bloom" } ethcore-bloom-journal = { path = "../util/bloom" }
ethcore-bytes = { path = "../util/bytes" } ethcore-bytes = { path = "../util/bytes" }
@ -45,9 +42,9 @@ heapsize = "0.4"
hyper = { git = "https://github.com/paritytech/hyper", default-features = false } hyper = { git = "https://github.com/paritytech/hyper", default-features = false }
itertools = "0.5" itertools = "0.5"
lazy_static = "0.2" lazy_static = "0.2"
linked-hash-map = "0.3.0" linked-hash-map = "0.5"
log = "0.3" log = "0.3"
lru-cache = "0.1.0" lru-cache = "0.1"
native-contracts = { path = "native_contracts" } native-contracts = { path = "native_contracts" }
num = "0.1" num = "0.1"
num_cpus = "1.2" num_cpus = "1.2"
@ -60,7 +57,6 @@ rlp = { path = "../util/rlp" }
rlp_derive = { path = "../util/rlp_derive" } rlp_derive = { path = "../util/rlp_derive" }
rust-crypto = "0.2.34" rust-crypto = "0.2.34"
rustc-hex = "1.0" rustc-hex = "1.0"
semver = "0.6"
stats = { path = "../util/stats" } stats = { path = "../util/stats" }
time = "0.1" time = "0.1"
transient-hashmap = "0.4" transient-hashmap = "0.4"

View File

@ -46,10 +46,12 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
Ok(format!(r##" Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}}; use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, IntoFuture, BoxFuture}}; use futures::{{future, Future, IntoFuture}};
use ethabi::{{Contract, Interface, Token, Event}}; use ethabi::{{Contract, Interface, Token, Event}};
use bigint; use bigint;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
/// Generated Rust bindings to an Ethereum contract. /// Generated Rust bindings to an Ethereum contract.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct {name} {{ pub struct {name} {{
@ -118,15 +120,14 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
let call_future = match function.encode_call({to_tokens}) {{ let call_future = match function.encode_call({to_tokens}) {{
Ok(call_data) => (call)(call_addr, call_data), Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return future::err(format!("Error encoding call: {{:?}}", e)).boxed(), Err(e) => return Box::new(future::err(format!("Error encoding call: {{:?}}", e))),
}}; }};
call_future Box::new(call_future
.into_future() .into_future()
.and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e))) .and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e)))
.map(Vec::into_iter) .map(Vec::into_iter)
.and_then(|mut outputs| {decode_outputs}) .and_then(|mut outputs| {decode_outputs}))
.boxed()
}} }}
"##, "##,
abi_name = name, abi_name = name,

View File

@ -164,8 +164,6 @@ mod tests {
#[test] #[test]
fn uses_current_set() { fn uses_current_set() {
let _ = ::env_logger::init();
let tap = Arc::new(AccountProvider::transient_provider()); let tap = Arc::new(AccountProvider::transient_provider());
let s0: Secret = keccak("0").into(); let s0: Secret = keccak("0").into();
let v0 = tap.insert_account(s0.clone(), "").unwrap(); let v0 = tap.insert_account(s0.clone(), "").unwrap();

View File

@ -71,15 +71,12 @@
//! cargo build --release //! cargo build --release
//! ``` //! ```
extern crate bit_set;
extern crate bloomchain; extern crate bloomchain;
extern crate bn; extern crate bn;
extern crate byteorder; extern crate byteorder;
extern crate crossbeam; extern crate crossbeam;
extern crate common_types as types; extern crate common_types as types;
extern crate crypto; extern crate crypto;
extern crate env_logger;
extern crate ethabi;
extern crate ethash; extern crate ethash;
extern crate ethcore_bloom_journal as bloom_journal; extern crate ethcore_bloom_journal as bloom_journal;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
@ -119,7 +116,6 @@ extern crate unexpected;
#[macro_use] #[macro_use]
extern crate rlp_derive; extern crate rlp_derive;
extern crate rustc_hex; extern crate rustc_hex;
extern crate semver;
extern crate stats; extern crate stats;
extern crate time; extern crate time;
extern crate transient_hashmap; extern crate transient_hashmap;

View File

@ -7,14 +7,12 @@ version = "1.8.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
ethabi = "2.0"
futures = "0.1" futures = "0.1"
log = "0.3" log = "0.3"
mime = "0.2" mime = "0.3"
mime_guess = "1.6.1" mime_guess = "2.0.0-alpha.2"
rand = "0.3" rand = "0.3"
rustc-hex = "1.0" rustc-hex = "1.0"
parking_lot = "0.4"
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }
@ -22,3 +20,7 @@ ethcore-bytes = { path = "../util/bytes" }
parity-reactor = { path = "../util/reactor" } parity-reactor = { path = "../util/reactor" }
native-contracts = { path = "../ethcore/native_contracts" } native-contracts = { path = "../ethcore/native_contracts" }
hash = { path = "../util/hash" } hash = { path = "../util/hash" }
[dev-dependencies]
ethabi = "2.0"
parking_lot = "0.4"

View File

@ -20,24 +20,26 @@
#[macro_use] #[macro_use]
extern crate log; extern crate log;
#[macro_use]
extern crate mime;
extern crate ethabi;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint; extern crate ethcore_bigint as bigint;
extern crate ethcore_bytes as bytes; extern crate ethcore_bytes as bytes;
extern crate futures; extern crate futures;
extern crate hash;
extern crate mime;
extern crate mime_guess; extern crate mime_guess;
extern crate native_contracts; extern crate native_contracts;
extern crate parity_reactor; extern crate parity_reactor;
extern crate parking_lot;
extern crate rand; extern crate rand;
extern crate rustc_hex; extern crate rustc_hex;
extern crate hash;
pub extern crate fetch; pub extern crate fetch;
#[cfg(test)]
extern crate parking_lot;
#[cfg(test)]
extern crate ethabi;
mod client; mod client;
pub mod urlhint; pub mod urlhint;

View File

@ -18,15 +18,19 @@
use std::sync::Arc; use std::sync::Arc;
use rustc_hex::ToHex; use rustc_hex::ToHex;
use mime::Mime; use mime::{self, Mime};
use mime_guess; use mime_guess;
use hash::keccak; use hash::keccak;
use futures::{future, BoxFuture, Future}; use futures::{future, Future};
use native_contracts::{Registry, Urlhint}; use native_contracts::{Registry, Urlhint};
use util::Address; use util::Address;
use bytes::Bytes; use bytes::Bytes;
/// Boxed future that can be shared between threads.
/// TODO [ToDr] Use concrete types!
pub type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
const COMMIT_LEN: usize = 20; const COMMIT_LEN: usize = 20;
/// RAW Contract interface. /// RAW Contract interface.
@ -127,7 +131,7 @@ fn decode_urlhint_output(output: (String, ::bigint::hash::H160, Address)) -> Opt
let commit = GithubApp::commit(&commit); let commit = GithubApp::commit(&commit);
if commit == Some(Default::default()) { if commit == Some(Default::default()) {
let mime = guess_mime_type(&account_slash_repo).unwrap_or(mime!(Application/_)); let mime = guess_mime_type(&account_slash_repo).unwrap_or(mime::APPLICATION_JSON);
return Some(URLHintResult::Content(Content { return Some(URLHintResult::Content(Content {
url: account_slash_repo, url: account_slash_repo,
mime: mime, mime: mime,
@ -158,7 +162,8 @@ impl URLHint for URLHintContract {
let do_call = |_, data| { let do_call = |_, data| {
let addr = match self.client.registrar() { let addr = match self.client.registrar() {
Ok(addr) => addr, Ok(addr) => addr,
Err(e) => return future::err(e).boxed(), Err(e) => return Box::new(future::err(e))
as BoxFuture<Vec<u8>, _>,
}; };
self.client.call(addr, data) self.client.call(addr, data)
@ -166,7 +171,7 @@ impl URLHint for URLHintContract {
let urlhint = self.urlhint.clone(); let urlhint = self.urlhint.clone();
let client = self.client.clone(); let client = self.client.clone();
self.registrar.get_address(do_call, keccak("githubhint"), "A".into()) Box::new(self.registrar.get_address(do_call, keccak("githubhint"), "A".into())
.map(|addr| if addr == Address::default() { None } else { Some(addr) }) .map(|addr| if addr == Address::default() { None } else { Some(addr) })
.and_then(move |address| { .and_then(move |address| {
let mut fixed_id = [0; 32]; let mut fixed_id = [0; 32];
@ -180,7 +185,7 @@ impl URLHint for URLHintContract {
Either::B(urlhint.entries(do_call, ::bigint::hash::H256(fixed_id)).map(decode_urlhint_output)) Either::B(urlhint.entries(do_call, ::bigint::hash::H256(fixed_id)).map(decode_urlhint_output))
} }
} }
}).boxed() }))
} }
} }
@ -213,7 +218,7 @@ pub mod tests {
use std::str::FromStr; use std::str::FromStr;
use rustc_hex::FromHex; use rustc_hex::FromHex;
use futures::{BoxFuture, Future, IntoFuture}; use futures::{Future, IntoFuture};
use super::*; use super::*;
use super::guess_mime_type; use super::guess_mime_type;
@ -251,7 +256,7 @@ pub mod tests {
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> { fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
self.calls.lock().push((address.to_hex(), data.to_hex())); self.calls.lock().push((address.to_hex(), data.to_hex()));
let res = self.responses.lock().remove(0); let res = self.responses.lock().remove(0);
res.into_future().boxed() Box::new(res.into_future())
} }
} }
@ -326,7 +331,7 @@ pub mod tests {
// then // then
assert_eq!(res, Some(URLHintResult::Content(Content { assert_eq!(res, Some(URLHintResult::Content(Content {
url: "https://parity.io/assets/images/ethcore-black-horizontal.png".into(), url: "https://parity.io/assets/images/ethcore-black-horizontal.png".into(),
mime: mime!(Image/Png), mime: mime::IMAGE_PNG,
owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(),
}))) })))
} }
@ -358,9 +363,9 @@ pub mod tests {
assert_eq!(guess_mime_type(url1), None); assert_eq!(guess_mime_type(url1), None);
assert_eq!(guess_mime_type(url2), Some(mime!(Image/Png))); assert_eq!(guess_mime_type(url2), Some(mime::IMAGE_PNG));
assert_eq!(guess_mime_type(url3), Some(mime!(Image/Png))); assert_eq!(guess_mime_type(url3), Some(mime::IMAGE_PNG));
assert_eq!(guess_mime_type(url4), Some(mime!(Image/Jpeg))); assert_eq!(guess_mime_type(url4), Some(mime::IMAGE_JPEG));
assert_eq!(guess_mime_type(url5), Some(mime!(Image/Png))); assert_eq!(guess_mime_type(url5), Some(mime::IMAGE_PNG));
} }
} }

View File

@ -235,7 +235,7 @@ impl Manager {
where F: Fn() -> Result<R, &'static str> where F: Fn() -> Result<R, &'static str>
{ {
let mut err = Error::KeyNotFound; let mut err = Error::KeyNotFound;
/// Try to open device a few times. // Try to open device a few times.
for _ in 0..10 { for _ in 0..10 {
match f() { match f() {
Ok(handle) => return Ok(handle), Ok(handle) => return Ok(handle),

View File

@ -10,8 +10,9 @@ ethcore = { path = "../ethcore" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }
ethcore-bytes = { path = "../util/bytes" } ethcore-bytes = { path = "../util/bytes" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
rlp = { path = "../util/rlp" } rlp = { path = "../util/rlp" }
mime = "0.2"
cid = "0.2" cid = "0.2"
multihash = "0.6" multihash = "0.6"
unicase = "2.0"

View File

@ -14,43 +14,40 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[macro_use]
extern crate mime;
extern crate multihash; extern crate multihash;
extern crate cid; extern crate cid;
extern crate unicase;
extern crate rlp; extern crate rlp;
extern crate ethcore; extern crate ethcore;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint; extern crate ethcore_bigint as bigint;
extern crate ethcore_bytes as bytes; extern crate ethcore_bytes as bytes;
extern crate jsonrpc_core as core;
extern crate jsonrpc_http_server as http; extern crate jsonrpc_http_server as http;
pub mod error; pub mod error;
mod route; mod route;
use std::io::Write; use std::thread;
use std::sync::Arc; use std::sync::{mpsc, Arc};
use std::net::{SocketAddr, IpAddr}; use std::net::{SocketAddr, IpAddr};
use core::futures::future::{self, FutureResult};
use core::futures::{self, Future};
use ethcore::client::BlockChainClient;
use http::hyper::header::{self, Vary, ContentType};
use http::hyper::{Method, StatusCode};
use http::hyper::{self, server};
use unicase::Ascii;
use error::ServerError; use error::ServerError;
use route::Out; use route::Out;
use http::hyper::server::{Handler, Request, Response};
use http::hyper::net::HttpStream;
use http::hyper::header::{self, Vary, ContentLength, ContentType};
use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode};
use ethcore::client::BlockChainClient;
pub use http::hyper::server::Listening;
pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; pub use http::{AccessControlAllowOrigin, Host, DomainsValidation};
/// Request/response handler /// Request/response handler
pub struct IpfsHandler { pub struct IpfsHandler {
/// Response to send out
out: Out,
/// How many bytes from the response have been written
out_progress: usize,
/// CORS response header
cors_header: Option<header::AccessControlAllowOrigin>,
/// Allowed CORS domains /// Allowed CORS domains
cors_domains: Option<Vec<AccessControlAllowOrigin>>, cors_domains: Option<Vec<AccessControlAllowOrigin>>,
/// Hostnames allowed in the `Host` request header /// Hostnames allowed in the `Host` request header
@ -66,124 +63,68 @@ impl IpfsHandler {
pub fn new(cors: DomainsValidation<AccessControlAllowOrigin>, hosts: DomainsValidation<Host>, client: Arc<BlockChainClient>) -> Self { pub fn new(cors: DomainsValidation<AccessControlAllowOrigin>, hosts: DomainsValidation<Host>, client: Arc<BlockChainClient>) -> Self {
IpfsHandler { IpfsHandler {
out: Out::Bad("Invalid Request"),
out_progress: 0,
cors_header: None,
cors_domains: cors.into(), cors_domains: cors.into(),
allowed_hosts: hosts.into(), allowed_hosts: hosts.into(),
client: client, client: client,
} }
} }
} pub fn on_request(&self, req: hyper::Request) -> (Option<header::AccessControlAllowOrigin>, Out) {
/// Implement Hyper's HTTP handler
impl Handler<HttpStream> for IpfsHandler {
fn on_request(&mut self, req: Request<HttpStream>) -> Next {
match *req.method() { match *req.method() {
Method::Get | Method::Post => {}, Method::Get | Method::Post => {},
_ => return Next::write() _ => return (None, Out::Bad("Invalid Request")),
} }
if !http::is_host_allowed(&req, &self.allowed_hosts) { if !http::is_host_allowed(&req, &self.allowed_hosts) {
self.out = Out::Bad("Disallowed Host header"); return (None, Out::Bad("Disallowed Host header"));
return Next::write();
} }
let cors_header = http::cors_header(&req, &self.cors_domains); let cors_header = http::cors_header(&req, &self.cors_domains);
if cors_header == http::CorsHeader::Invalid { if cors_header == http::CorsHeader::Invalid {
self.out = Out::Bad("Disallowed Origin header"); return (None, Out::Bad("Disallowed Origin header"));
return Next::write();
}
self.cors_header = cors_header.into();
let (path, query) = match *req.uri() {
RequestUri::AbsolutePath { ref path, ref query } => (path, query.as_ref().map(AsRef::as_ref)),
_ => return Next::write(),
};
self.out = self.route(path, query);
Next::write()
}
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
Next::write()
}
fn on_response(&mut self, res: &mut Response) -> Next {
use Out::*;
match self.out {
OctetStream(ref bytes) => {
use mime::{Mime, TopLevel, SubLevel};
// `OctetStream` is not a valid variant, so need to construct
// the type manually.
let content_type = Mime(
TopLevel::Application,
SubLevel::Ext("octet-stream".into()),
vec![]
);
res.headers_mut().set(ContentLength(bytes.len() as u64));
res.headers_mut().set(ContentType(content_type));
},
NotFound(reason) => {
res.set_status(StatusCode::NotFound);
res.headers_mut().set(ContentLength(reason.len() as u64));
res.headers_mut().set(ContentType(mime!(Text/Plain)));
},
Bad(reason) => {
res.set_status(StatusCode::BadRequest);
res.headers_mut().set(ContentLength(reason.len() as u64));
res.headers_mut().set(ContentType(mime!(Text/Plain)));
}
} }
if let Some(cors_header) = self.cors_header.take() { let path = req.uri().path();
res.headers_mut().set(cors_header); let query = req.uri().query();
res.headers_mut().set(Vary::Items(vec!["Origin".into()])); return (cors_header.into(), self.route(path, query));
}
Next::write()
}
fn on_response_writable(&mut self, transport: &mut Encoder<HttpStream>) -> Next {
use Out::*;
// Get the data to write as a byte slice
let data = match self.out {
OctetStream(ref bytes) => &bytes,
NotFound(reason) | Bad(reason) => reason.as_bytes(),
};
write_chunk(transport, &mut self.out_progress, data)
} }
} }
/// Attempt to write entire `data` from current `progress` impl server::Service for IpfsHandler {
fn write_chunk<W: Write>(transport: &mut W, progress: &mut usize, data: &[u8]) -> Next { type Request = hyper::Request;
// Skip any bytes that have already been written type Response = hyper::Response;
let chunk = &data[*progress..]; type Error = hyper::Error;
type Future = FutureResult<hyper::Response, hyper::Error>;
// Write an get the amount of bytes written. End the connection in case of an error. fn call(&self, request: Self::Request) -> Self::Future {
let written = match transport.write(chunk) { let (cors_header, out) = self.on_request(request);
Ok(written) => written,
Err(_) => return Next::end(),
};
*progress += written; let mut res = match out {
Out::OctetStream(bytes) => {
hyper::Response::new()
.with_status(StatusCode::Ok)
.with_header(ContentType::octet_stream())
.with_body(bytes)
},
Out::NotFound(reason) => {
hyper::Response::new()
.with_status(StatusCode::NotFound)
.with_header(ContentType::plaintext())
.with_body(reason)
},
Out::Bad(reason) => {
hyper::Response::new()
.with_status(StatusCode::BadRequest)
.with_header(ContentType::plaintext())
.with_body(reason)
}
};
// Close the connection if the entire remaining chunk has been written if let Some(cors_header) = cors_header {
if written < chunk.len() { res.headers_mut().set(cors_header);
Next::write() res.headers_mut().set(Vary::Items(vec![Ascii::new("Origin".into())]));
} else { }
Next::end()
future::ok(res)
} }
} }
@ -197,6 +138,19 @@ fn include_current_interface(mut hosts: Vec<Host>, interface: String, port: u16)
hosts hosts
} }
#[derive(Debug)]
pub struct Listening {
close: Option<futures::sync::oneshot::Sender<()>>,
thread: Option<thread::JoinHandle<()>>,
}
impl Drop for Listening {
fn drop(&mut self) {
self.close.take().unwrap().send(()).unwrap();
let _ = self.thread.take().unwrap().join();
}
}
pub fn start_server( pub fn start_server(
port: u16, port: u16,
interface: String, interface: String,
@ -210,67 +164,31 @@ pub fn start_server(
let hosts: Option<Vec<_>> = hosts.into(); let hosts: Option<Vec<_>> = hosts.into();
let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into(); let hosts: DomainsValidation<_> = hosts.map(move |hosts| include_current_interface(hosts, interface, port)).into();
Ok( let (close, shutdown_signal) = futures::sync::oneshot::channel::<()>();
http::hyper::Server::http(&addr)? let (tx, rx) = mpsc::sync_channel(1);
.handle(move |_| IpfsHandler::new(cors.clone(), hosts.clone(), client.clone())) let thread = thread::spawn(move || {
.map(|(listening, srv)| { let send = |res| tx.send(res).expect("rx end is never dropped; qed");
let server = match server::Http::new().bind(&addr, move || {
Ok(IpfsHandler::new(cors.clone(), hosts.clone(), client.clone()))
}) {
Ok(server) => {
send(Ok(()));
server
},
Err(err) => {
send(Err(err));
return;
}
};
::std::thread::spawn(move || { let _ = server.run_until(shutdown_signal.map_err(|_| {}));
srv.run(); });
});
listening // Wait for server to start successfuly.
})? rx.recv().expect("tx end is never dropped; qed")?;
)
} Ok(Listening {
close: close.into(),
#[cfg(test)] thread: thread.into(),
mod tests { })
use super::*;
#[test]
fn write_chunk_to_vec() {
let mut transport = Vec::new();
let mut progress = 0;
let _ = write_chunk(&mut transport, &mut progress, b"foobar");
assert_eq!(b"foobar".to_vec(), transport);
assert_eq!(6, progress);
}
#[test]
fn write_chunk_to_vec_part() {
let mut transport = Vec::new();
let mut progress = 3;
let _ = write_chunk(&mut transport, &mut progress, b"foobar");
assert_eq!(b"bar".to_vec(), transport);
assert_eq!(6, progress);
}
#[test]
fn write_chunk_to_array() {
use std::io::Cursor;
let mut buf = [0u8; 3];
let mut progress = 0;
{
let mut transport: Cursor<&mut [u8]> = Cursor::new(&mut buf);
let _ = write_chunk(&mut transport, &mut progress, b"foobar");
}
assert_eq!(*b"foo", buf);
assert_eq!(3, progress);
{
let mut transport: Cursor<&mut [u8]> = Cursor::new(&mut buf);
let _ = write_chunk(&mut transport, &mut progress, b"foobar");
}
assert_eq!(*b"bar", buf);
assert_eq!(6, progress);
}
} }

View File

@ -12,8 +12,8 @@ use-precompiled-js = ["parity-dapps-glue/use-precompiled-js"]
with-syntex = ["parity-dapps-glue/with-syntex"] with-syntex = ["parity-dapps-glue/with-syntex"]
[build-dependencies] [build-dependencies]
parity-dapps-glue = "1.7" parity-dapps-glue = "1.8"
[dependencies] [dependencies]
parity-dapps-glue = "1.7" parity-dapps-glue = "1.8"

View File

@ -11,8 +11,8 @@ default = ["with-syntex"]
with-syntex = ["parity-dapps-glue/with-syntex"] with-syntex = ["parity-dapps-glue/with-syntex"]
[build-dependencies] [build-dependencies]
parity-dapps-glue = "1.7" parity-dapps-glue = "1.8"
[dependencies] [dependencies]
parity-dapps-glue = "1.7" parity-dapps-glue = "1.8"

View File

@ -70,6 +70,7 @@ pub fn setup_log(config: &Config) -> Result<Arc<RotatingLogger>, String> {
// Disable info logging by default for some modules: // Disable info logging by default for some modules:
builder.filter(Some("ws"), LogLevelFilter::Warn); builder.filter(Some("ws"), LogLevelFilter::Warn);
builder.filter(Some("reqwest"), LogLevelFilter::Warn); builder.filter(Some("reqwest"), LogLevelFilter::Warn);
builder.filter(Some("hyper"), LogLevelFilter::Warn);
builder.filter(Some("rustls"), LogLevelFilter::Warn); builder.filter(Some("rustls"), LogLevelFilter::Warn);
// Enable info for others. // Enable info for others.
builder.filter(None, LogLevelFilter::Info); builder.filter(None, LogLevelFilter::Info);

View File

@ -480,7 +480,7 @@ usage! {
ARG arg_jsonrpc_server_threads: (Option<usize>) = None, or |c: &Config| otry!(c.rpc).server_threads, ARG arg_jsonrpc_server_threads: (Option<usize>) = None, or |c: &Config| otry!(c.rpc).server_threads,
"--jsonrpc-server-threads=[NUM]", "--jsonrpc-server-threads=[NUM]",
"Enables experimental faster implementation of JSON-RPC server. Requires Dapps server to be disabled using --no-dapps.", "Enables multiple threads handling incoming connections for HTTP JSON-RPC server.",
["API and console options WebSockets"] ["API and console options WebSockets"]
FLAG flag_no_ws: (bool) = false, or |c: &Config| otry!(c.websockets).disable.clone(), FLAG flag_no_ws: (bool) = false, or |c: &Config| otry!(c.websockets).disable.clone(),

View File

@ -141,16 +141,11 @@ impl Configuration {
} }
let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive);
let geth_compatibility = self.args.flag_geth; let geth_compatibility = self.args.flag_geth;
let mut dapps_conf = self.dapps_config(); let dapps_conf = self.dapps_config();
let ipfs_conf = self.ipfs_config(); let ipfs_conf = self.ipfs_config();
let secretstore_conf = self.secretstore_config()?; let secretstore_conf = self.secretstore_config()?;
let format = self.format()?; let format = self.format()?;
if self.args.arg_jsonrpc_server_threads.is_some() && dapps_conf.enabled {
dapps_conf.enabled = false;
writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr.");
}
let cmd = if self.args.flag_version { let cmd = if self.args.flag_version {
Cmd::Version Cmd::Version
} else if self.args.cmd_signer { } else if self.args.cmd_signer {
@ -867,9 +862,8 @@ impl Configuration {
hosts: self.rpc_hosts(), hosts: self.rpc_hosts(),
cors: self.rpc_cors(), cors: self.rpc_cors(),
server_threads: match self.args.arg_jsonrpc_server_threads { server_threads: match self.args.arg_jsonrpc_server_threads {
Some(threads) if threads > 0 => Some(threads), Some(threads) if threads > 0 => threads,
None => None, _ => 1,
_ => return Err("--jsonrpc-server-threads number needs to be positive.".into()),
}, },
processing_threads: self.args.arg_jsonrpc_threads, processing_threads: self.args.arg_jsonrpc_threads,
}; };

View File

@ -21,7 +21,8 @@ use dir::default_data_path;
use ethcore::client::{Client, BlockChainClient, BlockId}; use ethcore::client::{Client, BlockChainClient, BlockId};
use ethcore::transaction::{Transaction, Action}; use ethcore::transaction::{Transaction, Action};
use ethsync::LightSync; use ethsync::LightSync;
use futures::{future, IntoFuture, Future, BoxFuture}; use futures::{future, IntoFuture, Future};
use jsonrpc_core::BoxFuture;
use hash_fetch::fetch::Client as FetchClient; use hash_fetch::fetch::Client as FetchClient;
use hash_fetch::urlhint::ContractClient; use hash_fetch::urlhint::ContractClient;
use helpers::replace_home; use helpers::replace_home;
@ -30,7 +31,6 @@ use light::on_demand::{self, OnDemand};
use node_health::{SyncStatus, NodeHealth}; use node_health::{SyncStatus, NodeHealth};
use rpc; use rpc;
use rpc_apis::SignerService; use rpc_apis::SignerService;
use parity_reactor;
use util::Address; use util::Address;
use bytes::Bytes; use bytes::Bytes;
@ -81,9 +81,8 @@ impl ContractClient for FullRegistrar {
} }
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> { fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
self.client.call_contract(BlockId::Latest, address, data) Box::new(self.client.call_contract(BlockId::Latest, address, data)
.into_future() .into_future())
.boxed()
} }
} }
@ -113,7 +112,7 @@ impl<T: LightChainClient + 'static> ContractClient for LightRegistrar<T> {
let env_info = match env_info { let env_info = match env_info {
Ok(x) => x, Ok(x) => x,
Err(e) => return future::err(e).boxed(), Err(e) => return Box::new(future::err(e)),
}; };
let maybe_future = self.sync.with_context(move |ctx| { let maybe_future = self.sync.with_context(move |ctx| {
@ -140,8 +139,8 @@ impl<T: LightChainClient + 'static> ContractClient for LightRegistrar<T> {
}); });
match maybe_future { match maybe_future {
Some(fut) => fut.boxed(), Some(fut) => Box::new(fut),
None => future::err("cannot query registry: network disabled".into()).boxed(), None => Box::new(future::err("cannot query registry: network disabled".into())),
} }
} }
} }
@ -153,7 +152,6 @@ pub struct Dependencies {
pub node_health: NodeHealth, pub node_health: NodeHealth,
pub sync_status: Arc<SyncStatus>, pub sync_status: Arc<SyncStatus>,
pub contract_client: Arc<ContractClient>, pub contract_client: Arc<ContractClient>,
pub remote: parity_reactor::TokioRemote,
pub fetch: FetchClient, pub fetch: FetchClient,
pub signer: Arc<SignerService>, pub signer: Arc<SignerService>,
pub ui_address: Option<(String, u16)>, pub ui_address: Option<(String, u16)>,
@ -235,7 +233,6 @@ mod server {
use rpc_apis; use rpc_apis;
use parity_dapps; use parity_dapps;
use parity_reactor;
pub use parity_dapps::Middleware; pub use parity_dapps::Middleware;
@ -248,12 +245,11 @@ mod server {
extra_script_src: Vec<(String, u16)>, extra_script_src: Vec<(String, u16)>,
) -> Result<Middleware, String> { ) -> Result<Middleware, String> {
let signer = deps.signer; let signer = deps.signer;
let parity_remote = parity_reactor::Remote::new(deps.remote.clone());
let web_proxy_tokens = Arc::new(move |token| signer.web_proxy_access_token_domain(&token)); let web_proxy_tokens = Arc::new(move |token| signer.web_proxy_access_token_domain(&token));
Ok(parity_dapps::Middleware::dapps( Ok(parity_dapps::Middleware::dapps(
deps.fetch.pool(),
deps.node_health, deps.node_health,
parity_remote,
deps.ui_address, deps.ui_address,
extra_embed_on, extra_embed_on,
extra_script_src, extra_script_src,
@ -271,10 +267,9 @@ mod server {
deps: Dependencies, deps: Dependencies,
dapps_domain: &str, dapps_domain: &str,
) -> Result<Middleware, String> { ) -> Result<Middleware, String> {
let parity_remote = parity_reactor::Remote::new(deps.remote.clone());
Ok(parity_dapps::Middleware::ui( Ok(parity_dapps::Middleware::ui(
deps.fetch.pool(),
deps.node_health, deps.node_health,
parity_remote,
dapps_domain, dapps_domain,
deps.contract_client, deps.contract_client,
deps.sync_status, deps.sync_status,

View File

@ -23,7 +23,8 @@ use ethcore::machine::EthereumMachine;
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use ethsync::LightSync; use ethsync::LightSync;
use futures::{future, Future, BoxFuture}; use futures::{future, Future};
use futures::future::Either;
use light::client::fetch::ChainDataFetcher; use light::client::fetch::ChainDataFetcher;
use light::on_demand::{request, OnDemand}; use light::on_demand::{request, OnDemand};
@ -33,6 +34,8 @@ use bigint::hash::H256;
const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed"; const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed";
type BoxFuture<T, E> = Box<Future<Item = T, Error = E>>;
/// Allows on-demand fetch of data useful for the light client. /// Allows on-demand fetch of data useful for the light client.
pub struct EpochFetch { pub struct EpochFetch {
/// A handle to the sync service. /// A handle to the sync service.
@ -45,7 +48,7 @@ impl EpochFetch {
fn request<T>(&self, req: T) -> BoxFuture<T::Out, &'static str> fn request<T>(&self, req: T) -> BoxFuture<T::Out, &'static str>
where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static
{ {
match self.sync.read().upgrade() { Box::new(match self.sync.read().upgrade() {
Some(sync) => { Some(sync) => {
let on_demand = &self.on_demand; let on_demand = &self.on_demand;
let maybe_future = sync.with_context(move |ctx| { let maybe_future = sync.with_context(move |ctx| {
@ -53,12 +56,12 @@ impl EpochFetch {
}); });
match maybe_future { match maybe_future {
Some(x) => x.map_err(|_| "Request canceled").boxed(), Some(x) => Either::A(x.map_err(|_| "Request canceled")),
None => future::err("Unable to access network.").boxed(), None => Either::B(future::err("Unable to access network.")),
} }
} }
None => future::err("Unable to access network").boxed(), None => Either::B(future::err("Unable to access network")),
} })
} }
} }

View File

@ -94,11 +94,11 @@ impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T>
}); });
match maybe_fetching { match maybe_fetching {
Some(fut) => fut.boxed(), Some(fut) => future::Either::A(fut),
None => { None => {
debug!(target: "cull", "Unable to acquire network context; qed"); debug!(target: "cull", "Unable to acquire network context; qed");
future::ok(()).boxed() future::Either::B(future::ok(()))
} },
} }
}, Duration::from_millis(PURGE_TIMEOUT_MS), || {}) }, Duration::from_millis(PURGE_TIMEOUT_MS), || {})
} }

View File

@ -42,7 +42,7 @@ pub struct HttpConfiguration {
pub apis: ApiSet, pub apis: ApiSet,
pub cors: Option<Vec<String>>, pub cors: Option<Vec<String>>,
pub hosts: Option<Vec<String>>, pub hosts: Option<Vec<String>>,
pub server_threads: Option<usize>, pub server_threads: usize,
pub processing_threads: usize, pub processing_threads: usize,
} }
@ -61,7 +61,7 @@ impl Default for HttpConfiguration {
apis: ApiSet::UnsafeContext, apis: ApiSet::UnsafeContext,
cors: None, cors: None,
hosts: Some(Vec::new()), hosts: Some(Vec::new()),
server_threads: None, server_threads: 1,
processing_threads: 0, processing_threads: 0,
} }
} }
@ -100,7 +100,7 @@ impl From<UiConfiguration> for HttpConfiguration {
apis: rpc_apis::ApiSet::UnsafeContext, apis: rpc_apis::ApiSet::UnsafeContext,
cors: None, cors: None,
hosts: conf.hosts, hosts: conf.hosts,
server_threads: None, server_threads: 1,
processing_threads: 0, processing_threads: 0,
} }
} }
@ -278,13 +278,8 @@ pub fn new_http<D: rpc_apis::Dependencies>(
handler, handler,
remote, remote,
rpc::RpcExtractor, rpc::RpcExtractor,
match (conf.server_threads, middleware) { middleware,
(Some(threads), None) => rpc::HttpSettings::Threads(threads), conf.server_threads,
(None, middleware) => rpc::HttpSettings::Dapps(middleware),
(Some(_), Some(_)) => {
return Err("Dapps and fast multi-threaded RPC server cannot be enabled at the same time.".into())
},
}
); );
match start_result { match start_result {

View File

@ -327,7 +327,6 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
sync_status, sync_status,
node_health, node_health,
contract_client: contract_client, contract_client: contract_client,
remote: event_loop.raw_remote(),
fetch: fetch.clone(), fetch: fetch.clone(),
signer: signer_service.clone(), signer: signer_service.clone(),
ui_address: cmd.ui_conf.redirection_address(), ui_address: cmd.ui_conf.redirection_address(),
@ -721,7 +720,6 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
sync_status, sync_status,
node_health, node_health,
contract_client: contract_client, contract_client: contract_client,
remote: event_loop.raw_remote(),
fetch: fetch.clone(), fetch: fetch.clone(),
signer: signer_service.clone(), signer: signer_service.clone(),
ui_address: cmd.ui_conf.redirection_address(), ui_address: cmd.ui_conf.redirection_address(),

View File

@ -92,7 +92,7 @@ impl<F: Fetch> Client<F> {
/// Gets the current ETH price and calls `set_price` with the result. /// Gets the current ETH price and calls `set_price` with the result.
pub fn get<G: Fn(PriceInfo) + Sync + Send + 'static>(&self, set_price: G) { pub fn get<G: Fn(PriceInfo) + Sync + Send + 'static>(&self, set_price: G) {
self.fetch.forget(self.fetch.fetch(&self.api_endpoint) self.fetch.process_and_forget(self.fetch.fetch(&self.api_endpoint)
.map_err(|err| Error::Fetch(err)) .map_err(|err| Error::Fetch(err))
.and_then(move |mut response| { .and_then(move |mut response| {
if !response.is_success() { if !response.is_success() {
@ -156,10 +156,11 @@ mod test {
} }
// this guarantees that the calls to price_info::Client::get will block for execution // this guarantees that the calls to price_info::Client::get will block for execution
fn forget<F, I, E>(&self, f: F) where fn process_and_forget<F, I, E>(&self, f: F) where
F: Future<Item=I, Error=E> + Send + 'static, F: Future<Item=I, Error=E> + Send + 'static,
I: Send + 'static, I: Send + 'static,
E: Send + 'static { E: Send + 'static,
{
let _ = f.wait(); let _ = f.wait();
} }
} }

View File

@ -10,7 +10,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
ansi_term = "0.9" ansi_term = "0.9"
cid = "0.2" cid = "0.2"
futures = "0.1"
futures-cpupool = "0.1" futures-cpupool = "0.1"
log = "0.3" log = "0.3"
multihash ="0.6" multihash ="0.6"
@ -28,13 +27,12 @@ tokio-timer = "0.1"
transient-hashmap = "0.4" transient-hashmap = "0.4"
itertools = "0.5" itertools = "0.5"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-minihttp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-ipc-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.8" }
jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }

View File

@ -19,7 +19,6 @@
use jsonrpc_core; use jsonrpc_core;
use http; use http;
use hyper; use hyper;
use minihttp;
/// HTTP RPC server impl-independent metadata extractor /// HTTP RPC server impl-independent metadata extractor
pub trait HttpMetaExtractor: Send + Sync + 'static { pub trait HttpMetaExtractor: Send + Sync + 'static {
@ -29,24 +28,22 @@ pub trait HttpMetaExtractor: Send + Sync + 'static {
fn read_metadata(&self, origin: Option<String>, user_agent: Option<String>, dapps_origin: Option<String>) -> Self::Metadata; fn read_metadata(&self, origin: Option<String>, user_agent: Option<String>, dapps_origin: Option<String>) -> Self::Metadata;
} }
pub struct HyperMetaExtractor<T> { pub struct MetaExtractor<T> {
extractor: T, extractor: T,
} }
impl<T> HyperMetaExtractor<T> { impl<T> MetaExtractor<T> {
pub fn new(extractor: T) -> Self { pub fn new(extractor: T) -> Self {
HyperMetaExtractor { MetaExtractor { extractor }
extractor: extractor,
}
} }
} }
impl<M, T> http::MetaExtractor<M> for HyperMetaExtractor<T> where impl<M, T> http::MetaExtractor<M> for MetaExtractor<T> where
T: HttpMetaExtractor<Metadata = M>, T: HttpMetaExtractor<Metadata = M>,
M: jsonrpc_core::Metadata, M: jsonrpc_core::Metadata,
{ {
fn read_metadata(&self, req: &hyper::server::Request<hyper::net::HttpStream>) -> M { fn read_metadata(&self, req: &hyper::server::Request) -> M {
let as_string = |header: Option<&http::request_response::header::Raw>| header let as_string = |header: Option<&hyper::header::Raw>| header
.and_then(|raw| raw.one()) .and_then(|raw| raw.one())
.map(|raw| String::from_utf8_lossy(raw).into_owned()); .map(|raw| String::from_utf8_lossy(raw).into_owned());
@ -56,28 +53,3 @@ impl<M, T> http::MetaExtractor<M> for HyperMetaExtractor<T> where
self.extractor.read_metadata(origin, user_agent, dapps_origin) self.extractor.read_metadata(origin, user_agent, dapps_origin)
} }
} }
pub struct MiniMetaExtractor<T> {
extractor: T,
}
impl<T> MiniMetaExtractor<T> {
pub fn new(extractor: T) -> Self {
MiniMetaExtractor {
extractor: extractor,
}
}
}
impl<M, T> minihttp::MetaExtractor<M> for MiniMetaExtractor<T> where
T: HttpMetaExtractor<Metadata = M>,
M: jsonrpc_core::Metadata,
{
fn read_metadata(&self, req: &minihttp::Req) -> M {
let origin = req.header("origin").map(|h| h.to_owned());
let user_agent = req.header("user-agent").map(|h| h.to_owned());
let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned());
self.extractor.read_metadata(origin, user_agent, dapps_origin)
}
}

View File

@ -23,7 +23,6 @@
extern crate ansi_term; extern crate ansi_term;
extern crate cid; extern crate cid;
extern crate crypto as rust_crypto; extern crate crypto as rust_crypto;
extern crate futures;
extern crate futures_cpupool; extern crate futures_cpupool;
extern crate itertools; extern crate itertools;
extern crate multihash; extern crate multihash;
@ -41,7 +40,6 @@ extern crate transient_hashmap;
extern crate jsonrpc_core; extern crate jsonrpc_core;
extern crate jsonrpc_http_server as http; extern crate jsonrpc_http_server as http;
extern crate jsonrpc_ipc_server as ipc; extern crate jsonrpc_ipc_server as ipc;
extern crate jsonrpc_minihttp_server as minihttp;
extern crate jsonrpc_pubsub; extern crate jsonrpc_pubsub;
extern crate ethash; extern crate ethash;
@ -109,22 +107,8 @@ use std::net::SocketAddr;
use http::tokio_core; use http::tokio_core;
/// RPC HTTP Server instance /// RPC HTTP Server instance
pub enum HttpServer { pub type HttpServer = http::Server;
/// Fast MiniHTTP variant
Mini(minihttp::Server),
/// Hyper variant
Hyper(http::Server),
}
impl HttpServer {
/// Returns current listening address.
pub fn address(&self) -> &SocketAddr {
match *self {
HttpServer::Mini(ref s) => s.address(),
HttpServer::Hyper(ref s) => &s.addrs()[0],
}
}
}
/// RPC HTTP Server error /// RPC HTTP Server error
#[derive(Debug)] #[derive(Debug)]
@ -145,23 +129,6 @@ impl From<http::Error> for HttpServerError {
} }
} }
impl From<minihttp::Error> for HttpServerError {
fn from(e: minihttp::Error) -> Self {
use self::HttpServerError::*;
match e {
minihttp::Error::Io(io) => Io(io),
}
}
}
/// HTTP server implementation-specific settings.
pub enum HttpSettings<R: RequestMiddleware> {
/// Enable fast minihttp server with given number of threads.
Threads(usize),
/// Enable standard server with optional dapps middleware.
Dapps(Option<R>),
}
/// Start http server asynchronously and returns result with `Server` handle on success or an error. /// Start http server asynchronously and returns result with `Server` handle on success or an error.
pub fn start_http<M, S, H, T, R>( pub fn start_http<M, S, H, T, R>(
addr: &SocketAddr, addr: &SocketAddr,
@ -170,7 +137,8 @@ pub fn start_http<M, S, H, T, R>(
handler: H, handler: H,
remote: tokio_core::reactor::Remote, remote: tokio_core::reactor::Remote,
extractor: T, extractor: T,
settings: HttpSettings<R>, middleware: Option<R>,
threads: usize,
) -> Result<HttpServer, HttpServerError> where ) -> Result<HttpServer, HttpServerError> where
M: jsonrpc_core::Metadata, M: jsonrpc_core::Metadata,
S: jsonrpc_core::Middleware<M>, S: jsonrpc_core::Middleware<M>,
@ -178,30 +146,18 @@ pub fn start_http<M, S, H, T, R>(
T: HttpMetaExtractor<Metadata=M>, T: HttpMetaExtractor<Metadata=M>,
R: RequestMiddleware, R: RequestMiddleware,
{ {
Ok(match settings { let mut builder = http::ServerBuilder::new(handler)
HttpSettings::Dapps(middleware) => { .threads(threads)
let mut builder = http::ServerBuilder::new(handler) .event_loop_remote(remote)
.event_loop_remote(remote) .meta_extractor(http_common::MetaExtractor::new(extractor))
.meta_extractor(http_common::HyperMetaExtractor::new(extractor)) .cors(cors_domains.into())
.cors(cors_domains.into()) .allowed_hosts(allowed_hosts.into());
.allowed_hosts(allowed_hosts.into());
if let Some(dapps) = middleware { if let Some(dapps) = middleware {
builder = builder.request_middleware(dapps) builder = builder.request_middleware(dapps)
} }
builder.start_http(addr)
.map(HttpServer::Hyper)? Ok(builder.start_http(addr)?)
},
HttpSettings::Threads(threads) => {
minihttp::ServerBuilder::new(handler)
.threads(threads)
.meta_extractor(http_common::MiniMetaExtractor::new(extractor))
.cors(cors_domains.into())
.allowed_hosts(allowed_hosts.into())
.start_http(addr)
.map(HttpServer::Mini)?
},
})
} }
/// Start ipc server asynchronously and returns result with `Server` handle on success or an error. /// Start ipc server asynchronously and returns result with `Server` handle on success or an error.

View File

@ -18,7 +18,7 @@ use devtools::http_client;
use jsonrpc_core::MetaIoHandler; use jsonrpc_core::MetaIoHandler;
use http::{self, hyper}; use http::{self, hyper};
use {HttpSettings, HttpServer}; use {HttpServer};
use tests::helpers::Server; use tests::helpers::Server;
use v1::{extractors, Metadata}; use v1::{extractors, Metadata};
@ -33,11 +33,13 @@ fn serve(handler: Option<MetaIoHandler<Metadata>>) -> Server<HttpServer> {
handler, handler,
remote, remote,
extractors::RpcExtractor, extractors::RpcExtractor,
HttpSettings::Dapps(Some(|_req: &hyper::server::Request<hyper::net::HttpStream>, _control: &hyper::Control| { Some(|request: hyper::Request| {
http::RequestMiddlewareAction::Proceed { http::RequestMiddlewareAction::Proceed {
should_continue_on_invalid_cors: false should_continue_on_invalid_cors: false,
request,
} }
})), }),
1,
).unwrap()) ).unwrap())
} }
@ -49,14 +51,13 @@ fn request(server: Server<HttpServer>, request: &str) -> http_client::Response {
#[cfg(test)] #[cfg(test)]
mod testsing { mod testsing {
use jsonrpc_core::{MetaIoHandler, Value}; use jsonrpc_core::{MetaIoHandler, Value};
use jsonrpc_core::futures::{Future, future};
use v1::Metadata; use v1::Metadata;
use super::{request, Server}; use super::{request, Server};
fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) { fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) {
let mut io = MetaIoHandler::default(); let mut io = MetaIoHandler::default();
io.add_method_with_meta("hello", |_, meta: Metadata| { io.add_method_with_meta("hello", |_, meta: Metadata| {
future::ok(Value::String(format!("{}", meta.origin))).boxed() Ok(Value::String(format!("{}", meta.origin)))
}); });
let server = super::serve(Some(io)); let server = super::serve(Some(io));
let address = server.server.address().to_owned(); let address = server.server.address().to_owned();

View File

@ -236,7 +236,7 @@ impl<M: core::Middleware<Metadata>> core::Middleware<Metadata> for WsDispatcher<
if use_full { if use_full {
A(self.full_handler.handle_rpc_request(request, meta)) A(self.full_handler.handle_rpc_request(request, meta))
} else { } else {
B(process(request, meta).boxed()) B(Box::new(process(request, meta)))
} }
} }
} }

View File

@ -20,7 +20,6 @@ use std::fmt::Debug;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
use futures::{future, Future, BoxFuture};
use light::cache::Cache as LightDataCache; use light::cache::Cache as LightDataCache;
use light::client::LightChainClient; use light::client::LightChainClient;
use light::on_demand::{request, OnDemand}; use light::on_demand::{request, OnDemand};
@ -43,7 +42,9 @@ use ethcore::transaction::{Action, SignedTransaction, PendingTransaction, Transa
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use crypto::DEFAULT_MAC; use crypto::DEFAULT_MAC;
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::future::Either;
use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload}; use v1::helpers::{errors, TransactionRequest, FilledTransactionRequest, ConfirmationPayload};
use v1::types::{ use v1::types::{
H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes, H256 as RpcH256, H520 as RpcH520, Bytes as RpcBytes,
@ -120,7 +121,7 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
false => request.nonce, false => request.nonce,
true => Some(Self::fill_nonce(request.nonce, &from, &miner, &client)), true => Some(Self::fill_nonce(request.nonce, &from, &miner, &client)),
}; };
future::ok(FilledTransactionRequest { Box::new(future::ok(FilledTransactionRequest {
from: from, from: from,
used_default_from: request.from.is_none(), used_default_from: request.from.is_none(),
to: request.to, to: request.to,
@ -130,7 +131,7 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
value: request.value.unwrap_or_else(|| 0.into()), value: request.value.unwrap_or_else(|| 0.into()),
data: request.data.unwrap_or_else(Vec::new), data: request.data.unwrap_or_else(Vec::new),
condition: request.condition, condition: request.condition,
}).boxed() }))
} }
fn sign(&self, accounts: Arc<AccountProvider>, filled: FilledTransactionRequest, password: SignWith) fn sign(&self, accounts: Arc<AccountProvider>, filled: FilledTransactionRequest, password: SignWith)
@ -139,7 +140,7 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
let (client, miner) = (self.client.clone(), self.miner.clone()); let (client, miner) = (self.client.clone(), self.miner.clone());
let chain_id = client.signing_chain_id(); let chain_id = client.signing_chain_id();
let address = filled.from; let address = filled.from;
future::done({ Box::new(future::done({
let t = Transaction { let t = Transaction {
nonce: Self::fill_nonce(filled.nonce, &filled.from, &miner, &client), nonce: Self::fill_nonce(filled.nonce, &filled.from, &miner, &client),
action: filled.to.map_or(Action::Create, Action::Call), action: filled.to.map_or(Action::Create, Action::Call),
@ -159,7 +160,7 @@ impl<C: MiningBlockChainClient, M: MinerService> Dispatcher for FullDispatcher<C
.expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed") .expect("Transaction was signed by AccountsProvider; it never produces invalid signatures; qed")
})) }))
} }
}).boxed() }))
} }
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256, Error> { fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256, Error> {
@ -182,7 +183,7 @@ pub fn fetch_gas_price_corpus(
const GAS_PRICE_SAMPLE_SIZE: usize = 100; const GAS_PRICE_SAMPLE_SIZE: usize = 100;
if let Some(cached) = { cache.lock().gas_price_corpus() } { if let Some(cached) = { cache.lock().gas_price_corpus() } {
return future::ok(cached).boxed() return Box::new(future::ok(cached))
} }
let cache = cache.clone(); let cache = cache.clone();
@ -217,8 +218,8 @@ pub fn fetch_gas_price_corpus(
}); });
match eventual_corpus { match eventual_corpus {
Some(corp) => corp.map_err(|_| errors::no_light_peers()).boxed(), Some(corp) => Box::new(corp.map_err(|_| errors::no_light_peers())),
None => future::err(errors::network_disabled()).boxed(), None => Box::new(future::err(errors::network_disabled())),
} }
} }
@ -284,7 +285,7 @@ impl LightDispatcher {
// fast path where we don't go to network; nonce provided or can be gotten from queue. // fast path where we don't go to network; nonce provided or can be gotten from queue.
let maybe_nonce = self.transaction_queue.read().next_nonce(&addr); let maybe_nonce = self.transaction_queue.read().next_nonce(&addr);
if let Some(nonce) = maybe_nonce { if let Some(nonce) = maybe_nonce {
return future::ok(nonce).boxed() return Box::new(future::ok(nonce))
} }
let best_header = self.client.best_block_header(); let best_header = self.client.best_block_header();
@ -295,11 +296,11 @@ impl LightDispatcher {
}).expect("no back-references; therefore all back-references valid; qed")); }).expect("no back-references; therefore all back-references valid; qed"));
match nonce_future { match nonce_future {
Some(x) => Some(x) => Box::new(
x.map(move |acc| acc.map_or(account_start_nonce, |acc| acc.nonce)) x.map(move |acc| acc.map_or(account_start_nonce, |acc| acc.nonce))
.map_err(|_| errors::no_light_peers()) .map_err(|_| errors::no_light_peers())
.boxed(), ),
None => future::err(errors::network_disabled()).boxed() None => Box::new(future::err(errors::network_disabled()))
} }
} }
} }
@ -332,29 +333,29 @@ impl Dispatcher for LightDispatcher {
// fast path for known gas price. // fast path for known gas price.
let gas_price = match request_gas_price { let gas_price = match request_gas_price {
Some(gas_price) => future::ok(with_gas_price(gas_price)).boxed(), Some(gas_price) => Either::A(future::ok(with_gas_price(gas_price))),
None => fetch_gas_price_corpus( None => Either::B(fetch_gas_price_corpus(
self.sync.clone(), self.sync.clone(),
self.client.clone(), self.client.clone(),
self.on_demand.clone(), self.on_demand.clone(),
self.cache.clone() self.cache.clone()
).and_then(|corp| match corp.median() { ).and_then(|corp| match corp.median() {
Some(median) => future::ok(*median), Some(median) => Ok(*median),
None => future::ok(DEFAULT_GAS_PRICE), // fall back to default on error. None => Ok(DEFAULT_GAS_PRICE), // fall back to default on error.
}).map(with_gas_price).boxed() }).map(with_gas_price))
}; };
match (request_nonce, force_nonce) { match (request_nonce, force_nonce) {
(_, false) | (Some(_), true) => gas_price, (_, false) | (Some(_), true) => Box::new(gas_price),
(None, true) => { (None, true) => {
let next_nonce = self.next_nonce(from); let next_nonce = self.next_nonce(from);
gas_price.and_then(move |mut filled| next_nonce Box::new(gas_price.and_then(move |mut filled| next_nonce
.map_err(|_| errors::no_light_peers()) .map_err(|_| errors::no_light_peers())
.map(move |nonce| { .map(move |nonce| {
filled.nonce = Some(nonce); filled.nonce = Some(nonce);
filled filled
}) })
).boxed() ))
}, },
} }
} }
@ -390,13 +391,12 @@ impl Dispatcher for LightDispatcher {
// fast path for pre-filled nonce. // fast path for pre-filled nonce.
if let Some(nonce) = filled.nonce { if let Some(nonce) = filled.nonce {
return future::done(with_nonce(filled, nonce)).boxed() return Box::new(future::done(with_nonce(filled, nonce)))
} }
self.next_nonce(address) Box::new(self.next_nonce(address)
.map_err(|_| errors::no_light_peers()) .map_err(|_| errors::no_light_peers())
.and_then(move |nonce| with_nonce(filled, nonce)) .and_then(move |nonce| with_nonce(filled, nonce)))
.boxed()
} }
fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256, Error> { fn dispatch_transaction(&self, signed_transaction: PendingTransaction) -> Result<H256, Error> {
@ -497,7 +497,7 @@ pub fn execute<D: Dispatcher + 'static>(
match payload { match payload {
ConfirmationPayload::SendTransaction(request) => { ConfirmationPayload::SendTransaction(request) => {
let condition = request.condition.clone().map(Into::into); let condition = request.condition.clone().map(Into::into);
dispatcher.sign(accounts, request, pass) Box::new(dispatcher.sign(accounts, request, pass)
.map(move |v| v.map(move |tx| PendingTransaction::new(tx, condition))) .map(move |v| v.map(move |tx| PendingTransaction::new(tx, condition)))
.map(WithToken::into_tuple) .map(WithToken::into_tuple)
.map(|(tx, token)| (tx, token, dispatcher)) .map(|(tx, token)| (tx, token, dispatcher))
@ -506,18 +506,18 @@ pub fn execute<D: Dispatcher + 'static>(
.map(RpcH256::from) .map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction) .map(ConfirmationResponse::SendTransaction)
.map(move |h| WithToken::from((h, tok))) .map(move |h| WithToken::from((h, tok)))
}).boxed() }))
}, },
ConfirmationPayload::SignTransaction(request) => { ConfirmationPayload::SignTransaction(request) => {
dispatcher.sign(accounts, request, pass) Box::new(dispatcher.sign(accounts, request, pass)
.map(|result| result .map(|result| result
.map(RpcRichRawTransaction::from) .map(RpcRichRawTransaction::from)
.map(ConfirmationResponse::SignTransaction) .map(ConfirmationResponse::SignTransaction)
).boxed() ))
}, },
ConfirmationPayload::EthSignMessage(address, data) => { ConfirmationPayload::EthSignMessage(address, data) => {
if accounts.is_hardware_address(address) { if accounts.is_hardware_address(address) {
return future::err(errors::unsupported("Signing via hardware wallets is not supported.", None)).boxed(); return Box::new(future::err(errors::unsupported("Signing via hardware wallets is not supported.", None)));
} }
let hash = eth_data_hash(data); let hash = eth_data_hash(data);
@ -527,11 +527,11 @@ pub fn execute<D: Dispatcher + 'static>(
.map(RpcH520::from) .map(RpcH520::from)
.map(ConfirmationResponse::Signature) .map(ConfirmationResponse::Signature)
); );
future::done(res).boxed() Box::new(future::done(res))
}, },
ConfirmationPayload::Decrypt(address, data) => { ConfirmationPayload::Decrypt(address, data) => {
if accounts.is_hardware_address(address) { if accounts.is_hardware_address(address) {
return future::err(errors::unsupported("Decrypting via hardware wallets is not supported.", None)).boxed(); return Box::new(future::err(errors::unsupported("Decrypting via hardware wallets is not supported.", None)));
} }
let res = decrypt(&accounts, address, data, pass) let res = decrypt(&accounts, address, data, pass)
@ -539,7 +539,7 @@ pub fn execute<D: Dispatcher + 'static>(
.map(RpcBytes) .map(RpcBytes)
.map(ConfirmationResponse::Decrypt) .map(ConfirmationResponse::Decrypt)
); );
future::done(res).boxed() Box::new(future::done(res))
}, },
} }
} }
@ -602,20 +602,18 @@ pub fn from_rpc<D>(payload: RpcConfirmationPayload, default_account: Address, di
{ {
match payload { match payload {
RpcConfirmationPayload::SendTransaction(request) => { RpcConfirmationPayload::SendTransaction(request) => {
dispatcher.fill_optional_fields(request.into(), default_account, false) Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false)
.map(ConfirmationPayload::SendTransaction) .map(ConfirmationPayload::SendTransaction))
.boxed()
}, },
RpcConfirmationPayload::SignTransaction(request) => { RpcConfirmationPayload::SignTransaction(request) => {
dispatcher.fill_optional_fields(request.into(), default_account, false) Box::new(dispatcher.fill_optional_fields(request.into(), default_account, false)
.map(ConfirmationPayload::SignTransaction) .map(ConfirmationPayload::SignTransaction))
.boxed()
}, },
RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => { RpcConfirmationPayload::Decrypt(RpcDecryptRequest { address, msg }) => {
future::ok(ConfirmationPayload::Decrypt(address.into(), msg.into())).boxed() Box::new(future::ok(ConfirmationPayload::Decrypt(address.into(), msg.into())))
}, },
RpcConfirmationPayload::EthSignMessage(RpcSignRequest { address, data }) => { RpcConfirmationPayload::EthSignMessage(RpcSignRequest { address, data }) => {
future::ok(ConfirmationPayload::EthSignMessage(address.into(), data.into())).boxed() Box::new(future::ok(ConfirmationPayload::EthSignMessage(address.into(), data.into())))
}, },
} }
} }

View File

@ -20,7 +20,7 @@ use std::fmt;
use rlp::DecoderError; use rlp::DecoderError;
use ethcore::error::{Error as EthcoreError, CallError, TransactionError}; use ethcore::error::{Error as EthcoreError, CallError, TransactionError};
use ethcore::account_provider::{SignError as AccountError}; use ethcore::account_provider::{SignError as AccountError};
use jsonrpc_core::{Error, ErrorCode, Value}; use jsonrpc_core::{futures, Error, ErrorCode, Value};
mod codes { mod codes {
// NOTE [ToDr] Codes from [-32099, -32000] // NOTE [ToDr] Codes from [-32099, -32000]
@ -379,6 +379,6 @@ pub fn deprecated<T: Into<Option<String>>>(message: T) -> Error {
} }
// on-demand sender cancelled. // on-demand sender cancelled.
pub fn on_demand_cancel(_cancel: ::futures::sync::oneshot::Canceled) -> Error { pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error {
internal("on-demand sender cancelled", "") internal("on-demand sender cancelled", "")
} }

View File

@ -25,9 +25,9 @@ use ethcore::ids::BlockId;
use ethcore::filter::Filter as EthcoreFilter; use ethcore::filter::Filter as EthcoreFilter;
use ethcore::transaction::{Action, Transaction as EthTransaction}; use ethcore::transaction::{Action, Transaction as EthTransaction};
use futures::{future, Future, BoxFuture}; use jsonrpc_core::{BoxFuture, Error};
use futures::future::Either; use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::Error; use jsonrpc_core::futures::future::Either;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use light::cache::Cache; use light::cache::Cache;
@ -113,22 +113,21 @@ impl LightFetch {
let mut reqs = Vec::new(); let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) { let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r, Ok(r) => r,
Err(e) => return future::err(e).boxed(), Err(e) => return Box::new(future::err(e)),
}; };
let maybe_future = self.sync.with_context(move |ctx| { let maybe_future = self.sync.with_context(move |ctx| {
self.on_demand.request_raw(ctx, reqs) Box::new(self.on_demand.request_raw(ctx, reqs)
.expect("all back-references known to be valid; qed") .expect("all back-references known to be valid; qed")
.map(|res| extract_header(&res, header_ref) .map(|res| extract_header(&res, header_ref)
.expect("these responses correspond to requests that header_ref belongs to. \ .expect("these responses correspond to requests that header_ref belongs to. \
therefore it will not fail; qed")) therefore it will not fail; qed"))
.map_err(errors::on_demand_cancel) .map_err(errors::on_demand_cancel))
.boxed()
}); });
match maybe_future { match maybe_future {
Some(recv) => recv, Some(recv) => recv,
None => future::err(errors::network_disabled()).boxed() None => Box::new(future::err(errors::network_disabled()))
} }
} }
@ -138,25 +137,24 @@ impl LightFetch {
let mut reqs = Vec::new(); let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) { let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r, Ok(r) => r,
Err(e) => return future::err(e).boxed(), Err(e) => return Box::new(future::err(e)),
}; };
reqs.push(request::Account { header: header_ref, address: address }.into()); reqs.push(request::Account { header: header_ref, address: address }.into());
let maybe_future = self.sync.with_context(move |ctx| { let maybe_future = self.sync.with_context(move |ctx| {
self.on_demand.request_raw(ctx, reqs) Box::new(self.on_demand.request_raw(ctx, reqs)
.expect("all back-references known to be valid; qed") .expect("all back-references known to be valid; qed")
.map(|mut res| match res.pop() { .map(|mut res| match res.pop() {
Some(OnDemandResponse::Account(acc)) => acc, Some(OnDemandResponse::Account(acc)) => acc,
_ => panic!("responses correspond directly with requests in amount and type; qed"), _ => panic!("responses correspond directly with requests in amount and type; qed"),
}) })
.map_err(errors::on_demand_cancel) .map_err(errors::on_demand_cancel))
.boxed()
}); });
match maybe_future { match maybe_future {
Some(recv) => recv, Some(recv) => recv,
None => future::err(errors::network_disabled()).boxed() None => Box::new(future::err(errors::network_disabled()))
} }
} }
@ -193,7 +191,7 @@ impl LightFetch {
let header_fut = self.header(id); let header_fut = self.header(id);
// fetch missing transaction fields from the network. // fetch missing transaction fields from the network.
nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { Box::new(nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| {
let action = req.to.map_or(Action::Create, Action::Call); let action = req.to.map_or(Action::Create, Action::Call);
let value = req.value.unwrap_or_else(U256::zero); let value = req.value.unwrap_or_else(U256::zero);
let data = req.data.unwrap_or_default(); let data = req.data.unwrap_or_default();
@ -222,10 +220,10 @@ impl LightFetch {
// TODO: get last-hashes from network. // TODO: get last-hashes from network.
let env_info = match client.env_info(id) { let env_info = match client.env_info(id) {
Some(env_info) => env_info, Some(env_info) => env_info,
_ => return future::err(errors::unknown_block()).boxed(), _ => return Either::A(future::err(errors::unknown_block())),
}; };
execute_tx(gas_known, ExecuteParams { Either::B(execute_tx(gas_known, ExecuteParams {
from: from, from: from,
tx: tx, tx: tx,
hdr: hdr, hdr: hdr,
@ -233,8 +231,8 @@ impl LightFetch {
engine: client.engine().clone(), engine: client.engine().clone(),
on_demand: on_demand, on_demand: on_demand,
sync: sync, sync: sync,
}) }))
}).boxed() }))
} }
/// get a block itself. fails on unknown block ID. /// get a block itself. fails on unknown block ID.
@ -242,33 +240,31 @@ impl LightFetch {
let mut reqs = Vec::new(); let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) { let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r, Ok(r) => r,
Err(e) => return future::err(e).boxed(), Err(e) => return Box::new(future::err(e)),
}; };
reqs.push(request::Body(header_ref).into()); reqs.push(request::Body(header_ref).into());
let maybe_future = self.sync.with_context(move |ctx| { let maybe_future = self.sync.with_context(move |ctx| {
self.on_demand.request_raw(ctx, reqs) Box::new(self.on_demand.request_raw(ctx, reqs)
.expect("all back-references known to be valid; qed") .expect("all back-references known to be valid; qed")
.map(|mut res| match res.pop() { .map(|mut res| match res.pop() {
Some(OnDemandResponse::Body(b)) => b, Some(OnDemandResponse::Body(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"), _ => panic!("responses correspond directly with requests in amount and type; qed"),
}) })
.map_err(errors::on_demand_cancel) .map_err(errors::on_demand_cancel))
.boxed()
}); });
match maybe_future { match maybe_future {
Some(recv) => recv, Some(recv) => recv,
None => future::err(errors::network_disabled()).boxed() None => Box::new(future::err(errors::network_disabled()))
} }
} }
/// get transaction logs /// get transaction logs
pub fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>, Error> { pub fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>, Error> {
use std::collections::BTreeMap; use std::collections::BTreeMap;
use jsonrpc_core::futures::stream::{self, Stream};
use futures::stream::{self, Stream};
const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed";
@ -282,9 +278,9 @@ impl LightFetch {
}; };
match (block_number(filter.to_block), block_number(filter.from_block)) { match (block_number(filter.to_block), block_number(filter.from_block)) {
(Some(to), Some(from)) if to < from => return future::ok(Vec::new()).boxed(), (Some(to), Some(from)) if to < from => return Box::new(future::ok(Vec::new())),
(Some(_), Some(_)) => {}, (Some(_), Some(_)) => {},
_ => return future::err(errors::unknown_block()).boxed(), _ => return Box::new(future::err(errors::unknown_block())),
} }
let maybe_future = self.sync.with_context(move |ctx| { let maybe_future = self.sync.with_context(move |ctx| {
@ -318,8 +314,8 @@ impl LightFetch {
}); });
match maybe_future { match maybe_future {
Some(fut) => fut.boxed(), Some(fut) => Box::new(fut),
None => future::err(errors::network_disabled()).boxed(), None => Box::new(future::err(errors::network_disabled())),
} }
} }
} }
@ -339,7 +335,7 @@ struct ExecuteParams {
// this will double the gas on each `OutOfGas` error. // this will double the gas on each `OutOfGas` error.
fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResult, Error> { fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResult, Error> {
if !gas_known { if !gas_known {
future::loop_fn(params, |mut params| { Box::new(future::loop_fn(params, |mut params| {
execute_tx(true, params.clone()).and_then(move |res| { execute_tx(true, params.clone()).and_then(move |res| {
match res { match res {
Ok(executed) => { Ok(executed) => {
@ -360,7 +356,7 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResu
failed => Ok(future::Loop::Break(failed)), failed => Ok(future::Loop::Break(failed)),
} }
}) })
}).boxed() }))
} else { } else {
trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand", trace!(target: "light_fetch", "Placing execution request for {} gas in on_demand",
params.tx.gas); params.tx.gas);
@ -381,8 +377,8 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResu
}); });
match proved_future { match proved_future {
Some(fut) => fut.boxed(), Some(fut) => Box::new(fut),
None => future::err(errors::network_disabled()).boxed(), None => Box::new(future::err(errors::network_disabled())),
} }
} }
} }

View File

@ -15,8 +15,8 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use jsonrpc_core::Error; use jsonrpc_core::Error;
use futures::{self, Future}; use jsonrpc_core::futures::{self, Future};
use futures::sync::oneshot; use jsonrpc_core::futures::sync::oneshot;
use v1::helpers::errors; use v1::helpers::errors;
pub type Res<T> = Result<T, Error>; pub type Res<T> = Result<T, Error>;

View File

@ -22,8 +22,8 @@ use parking_lot::Mutex;
use jsonrpc_core::futures::future::{self, Either}; use jsonrpc_core::futures::future::{self, Either};
use jsonrpc_core::futures::sync::mpsc; use jsonrpc_core::futures::sync::mpsc;
use jsonrpc_core::futures::{Sink, Future, BoxFuture}; use jsonrpc_core::futures::{Sink, Future};
use jsonrpc_core::{self as core, MetaIoHandler}; use jsonrpc_core::{self as core, MetaIoHandler, BoxFuture};
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
use v1::helpers::Subscribers; use v1::helpers::Subscribers;
@ -130,7 +130,7 @@ impl<S: core::Middleware<Metadata>> GenericPollManager<S> {
} }
// return a future represeting all the polls // return a future represeting all the polls
future::join_all(futures).map(|_| ()).boxed() Box::new(future::join_all(futures).map(|_| ()))
} }
} }

View File

@ -20,7 +20,6 @@ use std::thread;
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
use std::sync::Arc; use std::sync::Arc;
use futures::{self, future, BoxFuture, Future};
use rlp::{self, UntrustedRlp}; use rlp::{self, UntrustedRlp};
use time::get_time; use time::get_time;
use bigint::prelude::U256; use bigint::prelude::U256;
@ -41,7 +40,8 @@ use ethcore::transaction::SignedTransaction;
use ethcore::snapshot::SnapshotService; use ethcore::snapshot::SnapshotService;
use ethsync::{SyncProvider}; use ethsync::{SyncProvider};
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::future;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use v1::helpers::{errors, limit_logs, fake_sign}; use v1::helpers::{errors, limit_logs, fake_sign};
@ -318,19 +318,15 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
} }
} }
fn author(&self, meta: Metadata) -> BoxFuture<RpcH160, Error> { fn author(&self, meta: Metadata) -> Result<RpcH160, Error> {
let dapp = meta.dapp_id(); let dapp = meta.dapp_id();
let author = move || { let mut miner = self.miner.author();
let mut miner = self.miner.author(); if miner == 0.into() {
if miner == 0.into() { miner = self.dapp_accounts(dapp.into())?.get(0).cloned().unwrap_or_default();
miner = self.dapp_accounts(dapp.into())?.get(0).cloned().unwrap_or_default(); }
}
Ok(RpcH160::from(miner)) Ok(RpcH160::from(miner))
};
futures::done(author()).boxed()
} }
fn is_mining(&self) -> Result<bool, Error> { fn is_mining(&self) -> Result<bool, Error> {
@ -345,15 +341,11 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
Ok(RpcU256::from(default_gas_price(&*self.client, &*self.miner))) Ok(RpcU256::from(default_gas_price(&*self.client, &*self.miner)))
} }
fn accounts(&self, meta: Metadata) -> BoxFuture<Vec<RpcH160>, Error> { fn accounts(&self, meta: Metadata) -> Result<Vec<RpcH160>, Error> {
let dapp = meta.dapp_id(); let dapp = meta.dapp_id();
let accounts = move || { let accounts = self.dapp_accounts(dapp.into())?;
let accounts = self.dapp_accounts(dapp.into())?; Ok(accounts.into_iter().map(Into::into).collect())
Ok(accounts.into_iter().map(Into::into).collect())
};
futures::done(accounts()).boxed()
} }
fn block_number(&self) -> Result<RpcU256, Error> { fn block_number(&self) -> Result<RpcU256, Error> {
@ -371,7 +363,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
None => Err(errors::state_pruned()), None => Err(errors::state_pruned()),
}; };
future::done(res).boxed() Box::new(future::done(res))
} }
fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing<BlockNumber>) -> BoxFuture<RpcH256, Error> { fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing<BlockNumber>) -> BoxFuture<RpcH256, Error> {
@ -386,7 +378,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
None => Err(errors::state_pruned()), None => Err(errors::state_pruned()),
}; };
future::done(res).boxed() Box::new(future::done(res))
} }
fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> { fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
@ -411,38 +403,37 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
} }
}; };
future::done(res).boxed() Box::new(future::done(res))
} }
fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> { fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> {
future::ok(self.client.block(BlockId::Hash(hash.into())) Box::new(future::ok(self.client.block(BlockId::Hash(hash.into()))
.map(|block| block.transactions_count().into())).boxed() .map(|block| block.transactions_count().into())))
} }
fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> { fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> {
future::ok(match num { Box::new(future::ok(match num {
BlockNumber::Pending => Some( BlockNumber::Pending => Some(
self.miner.status().transactions_in_pending_block.into() self.miner.status().transactions_in_pending_block.into()
), ),
_ => _ =>
self.client.block(num.into()) self.client.block(num.into())
.map(|block| block.transactions_count().into()) .map(|block| block.transactions_count().into())
}).boxed() }))
} }
fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> { fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> {
future::ok(self.client.block(BlockId::Hash(hash.into())) Box::new(future::ok(self.client.block(BlockId::Hash(hash.into()))
.map(|block| block.uncles_count().into())) .map(|block| block.uncles_count().into())))
.boxed()
} }
fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> { fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> {
future::ok(match num { Box::new(future::ok(match num {
BlockNumber::Pending => Some(0.into()), BlockNumber::Pending => Some(0.into()),
_ => self.client.block(num.into()) _ => self.client.block(num.into())
.map(|block| block.uncles_count().into() .map(|block| block.uncles_count().into()
), ),
}).boxed() }))
} }
fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> { fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
@ -456,15 +447,15 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
None => Err(errors::state_pruned()), None => Err(errors::state_pruned()),
}; };
future::done(res).boxed() Box::new(future::done(res))
} }
fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> { fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
future::done(self.block(BlockId::Hash(hash.into()), include_txs)).boxed() Box::new(future::done(self.block(BlockId::Hash(hash.into()), include_txs)))
} }
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> { fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
future::done(self.block(num.into(), include_txs)).boxed() Box::new(future::done(self.block(num.into(), include_txs)))
} }
fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> { fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> {
@ -521,7 +512,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
let logs = limit_logs(logs, filter.limit); let logs = limit_logs(logs, filter.limit);
future::ok(logs).boxed() Box::new(future::ok(logs))
} }
fn work(&self, no_new_work_timeout: Trailing<u64>) -> Result<Work, Error> { fn work(&self, no_new_work_timeout: Trailing<u64>) -> Result<Work, Error> {
@ -615,30 +606,24 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
fn call(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> { fn call(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
let request = CallRequest::into(request); let request = CallRequest::into(request);
let signed = match fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()) { let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()));
Ok(signed) => signed,
Err(e) => return future::err(e).boxed(),
};
let num = num.unwrap_or_default(); let num = num.unwrap_or_default();
let result = self.client.call(&signed, Default::default(), num.into()); let result = self.client.call(&signed, Default::default(), num.into());
future::done(result Box::new(future::done(result
.map(|b| b.output.into()) .map(|b| b.output.into())
.map_err(errors::call) .map_err(errors::call)
).boxed() ))
} }
fn estimate_gas(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> { fn estimate_gas(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
let request = CallRequest::into(request); let request = CallRequest::into(request);
let signed = match fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()) { let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp()));
Ok(signed) => signed, Box::new(future::done(self.client.estimate_gas(&signed, num.unwrap_or_default().into())
Err(e) => return future::err(e).boxed(),
};
future::done(self.client.estimate_gas(&signed, num.unwrap_or_default().into())
.map(Into::into) .map(Into::into)
.map_err(errors::call) .map_err(errors::call)
).boxed() ))
} }
fn compile_lll(&self, _: String) -> Result<Bytes, Error> { fn compile_lll(&self, _: String) -> Result<Bytes, Error> {

View File

@ -19,15 +19,15 @@
use std::sync::Arc; use std::sync::Arc;
use std::collections::HashSet; use std::collections::HashSet;
use jsonrpc_core::*;
use ethcore::miner::MinerService; use ethcore::miner::MinerService;
use ethcore::filter::Filter as EthcoreFilter; use ethcore::filter::Filter as EthcoreFilter;
use ethcore::client::{BlockChainClient, BlockId}; use ethcore::client::{BlockChainClient, BlockId};
use bigint::hash::H256; use bigint::hash::H256;
use parking_lot::Mutex; use parking_lot::Mutex;
use futures::{future, Future, BoxFuture}; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::future::Either;
use v1::traits::EthFilter; use v1::traits::EthFilter;
use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256}; use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256};
use v1::helpers::{PollFilter, PollManager, limit_logs}; use v1::helpers::{PollFilter, PollManager, limit_logs};
@ -89,7 +89,7 @@ impl<C, M> Filterable for EthFilterClient<C, M> where C: BlockChainClient, M: Mi
} }
fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>, Error> { fn logs(&self, filter: EthcoreFilter) -> BoxFuture<Vec<Log>, Error> {
future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()).boxed() Box::new(future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()))
} }
fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec<Log> { fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec<Log> {
@ -125,8 +125,8 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
fn filter_changes(&self, index: Index) -> BoxFuture<FilterChanges, Error> { fn filter_changes(&self, index: Index) -> BoxFuture<FilterChanges, Error> {
let mut polls = self.polls().lock(); let mut polls = self.polls().lock();
match polls.poll_mut(&index.value()) { Box::new(match polls.poll_mut(&index.value()) {
None => future::ok(FilterChanges::Empty).boxed(), None => Either::A(future::ok(FilterChanges::Empty)),
Some(filter) => match *filter { Some(filter) => match *filter {
PollFilter::Block(ref mut block_number) => { PollFilter::Block(ref mut block_number) => {
// + 1, cause we want to return hashes including current block hash. // + 1, cause we want to return hashes including current block hash.
@ -138,7 +138,7 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
*block_number = current_number; *block_number = current_number;
future::ok(FilterChanges::Hashes(hashes)).boxed() Either::A(future::ok(FilterChanges::Hashes(hashes)))
}, },
PollFilter::PendingTransaction(ref mut previous_hashes) => { PollFilter::PendingTransaction(ref mut previous_hashes) => {
// get hashes of pending transactions // get hashes of pending transactions
@ -162,7 +162,7 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
*previous_hashes = current_hashes; *previous_hashes = current_hashes;
// return new hashes // return new hashes
future::ok(FilterChanges::Hashes(new_hashes)).boxed() Either::A(future::ok(FilterChanges::Hashes(new_hashes)))
}, },
PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => {
// retrive the current block number // retrive the current block number
@ -200,14 +200,13 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
// retrieve logs in range from_block..min(BlockId::Latest..to_block) // retrieve logs in range from_block..min(BlockId::Latest..to_block)
let limit = filter.limit; let limit = filter.limit;
self.logs(filter) Either::B(self.logs(filter)
.map(move |mut logs| { logs.extend(pending); logs }) // append fetched pending logs .map(move |mut logs| { logs.extend(pending); logs }) // append fetched pending logs
.map(move |logs| limit_logs(logs, limit)) // limit the logs .map(move |logs| limit_logs(logs, limit)) // limit the logs
.map(FilterChanges::Logs) .map(FilterChanges::Logs))
.boxed()
} }
} }
} })
} }
fn filter_logs(&self, index: Index) -> BoxFuture<Vec<Log>, Error> { fn filter_logs(&self, index: Index) -> BoxFuture<Vec<Log>, Error> {
@ -217,7 +216,7 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
match polls.poll(&index.value()) { match polls.poll(&index.value()) {
Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => filter.clone(), Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => filter.clone(),
// just empty array // just empty array
_ => return future::ok(Vec::new()).boxed(), _ => return Box::new(future::ok(Vec::new())),
} }
}; };
@ -235,11 +234,10 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
// retrieve logs asynchronously, appending pending logs. // retrieve logs asynchronously, appending pending logs.
let limit = filter.limit; let limit = filter.limit;
let logs = self.logs(filter); let logs = self.logs(filter);
let res = logs Box::new(logs
.map(move |mut logs| { logs.extend(pending); logs }) .map(move |mut logs| { logs.extend(pending); logs })
.map(move |logs| limit_logs(logs, limit)) .map(move |logs| limit_logs(logs, limit))
.boxed(); )
res
} }
fn uninstall_filter(&self, index: Index) -> Result<bool, Error> { fn uninstall_filter(&self, index: Index) -> Result<bool, Error> {

View File

@ -19,8 +19,8 @@
use std::sync::Arc; use std::sync::Arc;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use futures::{self, future, BoxFuture, Future}; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::Error; use jsonrpc_core::futures::{self, Future, IntoFuture};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use jsonrpc_macros::pubsub::{Sink, Subscriber}; use jsonrpc_macros::pubsub::{Sink, Subscriber};
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
@ -131,8 +131,10 @@ impl<C> ChainNotificationHandler<C> {
} }
} }
fn notify_logs<F>(&self, enacted: &[H256], logs: F) where fn notify_logs<F, T>(&self, enacted: &[H256], logs: F) where
F: Fn(EthFilter) -> BoxFuture<Vec<Log>, Error>, F: Fn(EthFilter) -> T,
T: IntoFuture<Item = Vec<Log>, Error = Error>,
T::Future: Send + 'static,
{ {
for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() {
let logs = futures::future::join_all(enacted let logs = futures::future::join_all(enacted
@ -141,7 +143,7 @@ impl<C> ChainNotificationHandler<C> {
let mut filter = filter.clone(); let mut filter = filter.clone();
filter.from_block = BlockId::Hash(*hash); filter.from_block = BlockId::Hash(*hash);
filter.to_block = filter.from_block.clone(); filter.to_block = filter.from_block.clone();
logs(filter) logs(filter).into_future()
}) })
.collect::<Vec<_>>() .collect::<Vec<_>>()
); );
@ -224,15 +226,15 @@ impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
// Enacted logs // Enacted logs
self.notify_logs(&enacted, |filter| { self.notify_logs(&enacted, |filter| {
future::ok(self.client.logs(filter).into_iter().map(Into::into).collect()).boxed() Ok(self.client.logs(filter).into_iter().map(Into::into).collect())
}); });
// Retracted logs // Retracted logs
self.notify_logs(&retracted, |filter| { self.notify_logs(&retracted, |filter| {
future::ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| { Ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| {
log.log_type = "removed".into(); log.log_type = "removed".into();
log log
}).collect()).boxed() }).collect())
}); });
} }
} }
@ -270,10 +272,10 @@ impl<C: Send + Sync + 'static> EthPubSub for EthPubSubClient<C> {
let _ = subscriber.reject(error); let _ = subscriber.reject(error);
} }
fn unsubscribe(&self, id: SubscriptionId) -> BoxFuture<bool, Error> { fn unsubscribe(&self, id: SubscriptionId) -> Result<bool, Error> {
let res = self.heads_subscribers.write().remove(&id).is_some(); let res = self.heads_subscribers.write().remove(&id).is_some();
let res2 = self.logs_subscribers.write().remove(&id).is_some(); let res2 = self.logs_subscribers.write().remove(&id).is_some();
future::ok(res || res2).boxed() Ok(res || res2)
} }
} }

View File

@ -16,12 +16,11 @@
//! Eth RPC interface for the light client. //! Eth RPC interface for the light client.
// TODO: remove when complete.
#![allow(unused_imports, unused_variables)]
use std::sync::Arc; use std::sync::Arc;
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::future::Either;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use light::cache::Cache as LightDataCache; use light::cache::Cache as LightDataCache;
@ -30,25 +29,20 @@ use light::{cht, TransactionQueue};
use light::on_demand::{request, OnDemand}; use light::on_demand::{request, OnDemand};
use ethcore::account_provider::{AccountProvider, DappId}; use ethcore::account_provider::{AccountProvider, DappId};
use ethcore::basic_account::BasicAccount;
use ethcore::encoded; use ethcore::encoded;
use ethcore::executed::{Executed, ExecutionError};
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::filter::Filter as EthcoreFilter; use ethcore::filter::Filter as EthcoreFilter;
use ethcore::transaction::{Action, SignedTransaction, Transaction as EthTransaction}; use ethcore::transaction::SignedTransaction;
use ethsync::LightSync; use ethsync::LightSync;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP};
use bigint::prelude::U256; use bigint::prelude::U256;
use parking_lot::{RwLock, Mutex}; use parking_lot::{RwLock, Mutex};
use futures::{future, Future, BoxFuture, IntoFuture};
use futures::sync::oneshot;
use v1::impls::eth_filter::Filterable; use v1::impls::eth_filter::Filterable;
use v1::helpers::{CallRequest as CRequest, errors, limit_logs, dispatch}; use v1::helpers::{errors, limit_logs};
use v1::helpers::{PollFilter, PollManager}; use v1::helpers::{PollFilter, PollManager};
use v1::helpers::block_import::is_major_importing;
use v1::helpers::light_fetch::LightFetch; use v1::helpers::light_fetch::LightFetch;
use v1::traits::Eth; use v1::traits::Eth;
use v1::types::{ use v1::types::{
@ -58,8 +52,6 @@ use v1::types::{
}; };
use v1::metadata::Metadata; use v1::metadata::Metadata;
use util::Address;
const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed";
/// Light client `ETH` (and filter) RPC. /// Light client `ETH` (and filter) RPC.
@ -162,10 +154,10 @@ impl<T: LightChainClient + 'static> EthClient<T> {
}; };
// get the block itself. // get the block itself.
self.fetcher().block(id).and_then(move |block| { Box::new(self.fetcher().block(id).and_then(move |block| {
// then fetch the total difficulty (this is much easier after getting the block). // then fetch the total difficulty (this is much easier after getting the block).
match client.score(id) { match client.score(id) {
Some(score) => future::ok(fill_rich(block, Some(score))).boxed(), Some(score) => Either::A(future::ok(fill_rich(block, Some(score)))),
None => { None => {
// make a CHT request to fetch the chain score. // make a CHT request to fetch the chain score.
let req = cht::block_to_cht_number(block.number()) let req = cht::block_to_cht_number(block.number())
@ -181,7 +173,7 @@ impl<T: LightChainClient + 'static> EthClient<T> {
.expect("genesis always stored; qed") .expect("genesis always stored; qed")
.difficulty(); .difficulty();
return future::ok(fill_rich(block, Some(score))).boxed() return Either::A(future::ok(fill_rich(block, Some(score))))
} }
}; };
@ -191,7 +183,7 @@ impl<T: LightChainClient + 'static> EthClient<T> {
// - we get a score, and our hash is canonical. // - we get a score, and our hash is canonical.
let maybe_fut = sync.with_context(move |ctx| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS)); let maybe_fut = sync.with_context(move |ctx| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS));
match maybe_fut { match maybe_fut {
Some(fut) => fut Some(fut) => Either::B(fut
.map(move |(hash, score)| { .map(move |(hash, score)| {
let score = if hash == block.hash() { let score = if hash == block.hash() {
Some(score) Some(score)
@ -199,13 +191,13 @@ impl<T: LightChainClient + 'static> EthClient<T> {
None None
}; };
fill_rich(block, score) fill_rich(block, score)
}).map_err(errors::on_demand_cancel).boxed(), }).map_err(errors::on_demand_cancel)),
None => return future::err(errors::network_disabled()).boxed(), None => Either::A(future::err(errors::network_disabled())),
} }
} }
} }
}).boxed() }))
} }
} }
@ -235,8 +227,8 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
} }
} }
fn author(&self, _meta: Self::Metadata) -> BoxFuture<RpcH160, Error> { fn author(&self, _meta: Self::Metadata) -> Result<RpcH160, Error> {
future::ok(Default::default()).boxed() Ok(Default::default())
} }
fn is_mining(&self) -> Result<bool, Error> { fn is_mining(&self) -> Result<bool, Error> {
@ -254,16 +246,14 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
.unwrap_or_else(Default::default)) .unwrap_or_else(Default::default))
} }
fn accounts(&self, meta: Metadata) -> BoxFuture<Vec<RpcH160>, Error> { fn accounts(&self, meta: Metadata) -> Result<Vec<RpcH160>, Error> {
let dapp: DappId = meta.dapp_id().into(); let dapp: DappId = meta.dapp_id().into();
let accounts = self.accounts self.accounts
.note_dapp_used(dapp.clone()) .note_dapp_used(dapp.clone())
.and_then(|_| self.accounts.dapp_addresses(dapp)) .and_then(|_| self.accounts.dapp_addresses(dapp))
.map_err(|e| errors::account("Could not fetch accounts.", e)) .map_err(|e| errors::account("Could not fetch accounts.", e))
.map(|accs| accs.into_iter().map(Into::<RpcH160>::into).collect()); .map(|accs| accs.into_iter().map(Into::<RpcH160>::into).collect())
future::done(accounts).boxed()
} }
fn block_number(&self) -> Result<RpcU256, Error> { fn block_number(&self) -> Result<RpcU256, Error> {
@ -271,93 +261,93 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
} }
fn balance(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> { fn balance(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
self.fetcher().account(address.into(), num.unwrap_or_default().into()) Box::new(self.fetcher().account(address.into(), num.unwrap_or_default().into())
.map(|acc| acc.map_or(0.into(), |a| a.balance).into()).boxed() .map(|acc| acc.map_or(0.into(), |a| a.balance).into()))
} }
fn storage_at(&self, _address: RpcH160, _key: RpcU256, _num: Trailing<BlockNumber>) -> BoxFuture<RpcH256, Error> { fn storage_at(&self, _address: RpcH160, _key: RpcU256, _num: Trailing<BlockNumber>) -> BoxFuture<RpcH256, Error> {
future::err(errors::unimplemented(None)).boxed() Box::new(future::err(errors::unimplemented(None)))
} }
fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> { fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some).boxed() Box::new(self.rich_block(BlockId::Hash(hash.into()), include_txs).map(Some))
} }
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> { fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>, Error> {
self.rich_block(num.into(), include_txs).map(Some).boxed() Box::new(self.rich_block(num.into(), include_txs).map(Some))
} }
fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> { fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
self.fetcher().account(address.into(), num.unwrap_or_default().into()) Box::new(self.fetcher().account(address.into(), num.unwrap_or_default().into())
.map(|acc| acc.map_or(0.into(), |a| a.nonce).into()).boxed() .map(|acc| acc.map_or(0.into(), |a| a.nonce).into()))
} }
fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> { fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> {
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { Box::new(self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| {
if hdr.transactions_root() == KECCAK_NULL_RLP { if hdr.transactions_root() == KECCAK_NULL_RLP {
future::ok(Some(U256::from(0).into())).boxed() Either::A(future::ok(Some(U256::from(0).into())))
} else { } else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into()))) .map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into())))
.map(|x| x.map_err(errors::on_demand_cancel).boxed()) .map(|x| Either::B(x.map_err(errors::on_demand_cancel)))
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) .unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
} }
}).boxed() }))
} }
fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> { fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> {
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
self.fetcher().header(num.into()).and_then(move |hdr| { Box::new(self.fetcher().header(num.into()).and_then(move |hdr| {
if hdr.transactions_root() == KECCAK_NULL_RLP { if hdr.transactions_root() == KECCAK_NULL_RLP {
future::ok(Some(U256::from(0).into())).boxed() Either::A(future::ok(Some(U256::from(0).into())))
} else { } else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into()))) .map(|x| x.map(|b| Some(U256::from(b.transactions_count()).into())))
.map(|x| x.map_err(errors::on_demand_cancel).boxed()) .map(|x| Either::B(x.map_err(errors::on_demand_cancel)))
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) .unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
} }
}).boxed() }))
} }
fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> { fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>, Error> {
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| { Box::new(self.fetcher().header(BlockId::Hash(hash.into())).and_then(move |hdr| {
if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP {
future::ok(Some(U256::from(0).into())).boxed() Either::A(future::ok(Some(U256::from(0).into())))
} else { } else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into()))) .map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into())))
.map(|x| x.map_err(errors::on_demand_cancel).boxed()) .map(|x| Either::B(x.map_err(errors::on_demand_cancel)))
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) .unwrap_or_else(|| Either::A(future::err(errors::network_disabled())))
} }
}).boxed() }))
} }
fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> { fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>, Error> {
let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone()); let (sync, on_demand) = (self.sync.clone(), self.on_demand.clone());
self.fetcher().header(num.into()).and_then(move |hdr| { Box::new(self.fetcher().header(num.into()).and_then(move |hdr| {
if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP { if hdr.uncles_hash() == KECCAK_EMPTY_LIST_RLP {
future::ok(Some(U256::from(0).into())).boxed() Either::B(future::ok(Some(U256::from(0).into())))
} else { } else {
sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into()))) sync.with_context(|ctx| on_demand.request(ctx, request::Body(hdr.into())))
.map(|x| x.expect(NO_INVALID_BACK_REFS)) .map(|x| x.expect(NO_INVALID_BACK_REFS))
.map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into()))) .map(|x| x.map(|b| Some(U256::from(b.uncles_count()).into())))
.map(|x| x.map_err(errors::on_demand_cancel).boxed()) .map(|x| Either::A(x.map_err(errors::on_demand_cancel)))
.unwrap_or_else(|| future::err(errors::network_disabled()).boxed()) .unwrap_or_else(|| Either::B(future::err(errors::network_disabled())))
} }
}).boxed() }))
} }
fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> { fn code_at(&self, _address: RpcH160, _num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
future::err(errors::unimplemented(None)).boxed() Box::new(future::err(errors::unimplemented(None)))
} }
fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256, Error> { fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256, Error> {
@ -385,45 +375,45 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
} }
fn call(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> { fn call(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes, Error> {
self.fetcher().proved_execution(req, num).and_then(|res| { Box::new(self.fetcher().proved_execution(req, num).and_then(|res| {
match res { match res {
Ok(exec) => Ok(exec.output.into()), Ok(exec) => Ok(exec.output.into()),
Err(e) => Err(errors::execution(e)), Err(e) => Err(errors::execution(e)),
} }
}).boxed() }))
} }
fn estimate_gas(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> { fn estimate_gas(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256, Error> {
// TODO: binary chop for more accurate estimates. // TODO: binary chop for more accurate estimates.
self.fetcher().proved_execution(req, num).and_then(|res| { Box::new(self.fetcher().proved_execution(req, num).and_then(|res| {
match res { match res {
Ok(exec) => Ok((exec.refunded + exec.gas_used).into()), Ok(exec) => Ok((exec.refunded + exec.gas_used).into()),
Err(e) => Err(errors::execution(e)), Err(e) => Err(errors::execution(e)),
} }
}).boxed() }))
} }
fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> { fn transaction_by_hash(&self, _hash: RpcH256) -> Result<Option<Transaction>, Error> {
Err(errors::unimplemented(None)) Err(errors::unimplemented(None))
} }
fn transaction_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> Result<Option<Transaction>, Error> { fn transaction_by_block_hash_and_index(&self, _hash: RpcH256, _idx: Index) -> Result<Option<Transaction>, Error> {
Err(errors::unimplemented(None)) Err(errors::unimplemented(None))
} }
fn transaction_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> Result<Option<Transaction>, Error> { fn transaction_by_block_number_and_index(&self, _num: BlockNumber, _idx: Index) -> Result<Option<Transaction>, Error> {
Err(errors::unimplemented(None)) Err(errors::unimplemented(None))
} }
fn transaction_receipt(&self, hash: RpcH256) -> Result<Option<Receipt>, Error> { fn transaction_receipt(&self, _hash: RpcH256) -> Result<Option<Receipt>, Error> {
Err(errors::unimplemented(None)) Err(errors::unimplemented(None))
} }
fn uncle_by_block_hash_and_index(&self, hash: RpcH256, idx: Index) -> Result<Option<RichBlock>, Error> { fn uncle_by_block_hash_and_index(&self, _hash: RpcH256, _idx: Index) -> Result<Option<RichBlock>, Error> {
Err(errors::unimplemented(None)) Err(errors::unimplemented(None))
} }
fn uncle_by_block_number_and_index(&self, num: BlockNumber, idx: Index) -> Result<Option<RichBlock>, Error> { fn uncle_by_block_number_and_index(&self, _num: BlockNumber, _idx: Index) -> Result<Option<RichBlock>, Error> {
Err(errors::unimplemented(None)) Err(errors::unimplemented(None))
} }
@ -447,9 +437,8 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>, Error> { fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>, Error> {
let limit = filter.limit; let limit = filter.limit;
Filterable::logs(self, filter.into()) Box::new(Filterable::logs(self, filter.into())
.map(move|logs| limit_logs(logs, limit)) .map(move|logs| limit_logs(logs, limit)))
.boxed()
} }
fn work(&self, _timeout: Trailing<u64>) -> Result<Work, Error> { fn work(&self, _timeout: Trailing<u64>) -> Result<Work, Error> {

View File

@ -17,7 +17,6 @@
//! Parity-specific rpc implementation. //! Parity-specific rpc implementation.
use std::sync::Arc; use std::sync::Arc;
use std::collections::{BTreeMap, HashSet}; use std::collections::{BTreeMap, HashSet};
use futures::{future, Future, BoxFuture};
use util::misc::version_data; use util::misc::version_data;
@ -31,7 +30,8 @@ use node_health::{NodeHealth, Health};
use light::client::LightChainClient; use light::client::LightChainClient;
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::Future;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings};
use v1::helpers::dispatch::LightDispatcher; use v1::helpers::dispatch::LightDispatcher;
@ -140,15 +140,14 @@ impl Parity for ParityClient {
Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?) Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?)
} }
fn default_account(&self, meta: Self::Metadata) -> BoxFuture<H160, Error> { fn default_account(&self, meta: Self::Metadata) -> Result<H160, Error> {
let dapp_id = meta.dapp_id(); let dapp_id = meta.dapp_id();
future::ok(self.accounts Ok(self.accounts
.dapp_addresses(dapp_id.into()) .dapp_addresses(dapp_id.into())
.ok() .ok()
.and_then(|accounts| accounts.get(0).cloned()) .and_then(|accounts| accounts.get(0).cloned())
.map(|acc| acc.into()) .map(|acc| acc.into())
.unwrap_or_default() .unwrap_or_default())
).boxed()
} }
fn transactions_limit(&self) -> Result<usize, Error> { fn transactions_limit(&self) -> Result<usize, Error> {
@ -221,10 +220,9 @@ impl Parity for ParityClient {
} }
fn gas_price_histogram(&self) -> BoxFuture<Histogram, Error> { fn gas_price_histogram(&self) -> BoxFuture<Histogram, Error> {
self.light_dispatch.gas_price_corpus() Box::new(self.light_dispatch.gas_price_corpus()
.and_then(|corpus| corpus.histogram(10).ok_or_else(errors::not_enough_data)) .and_then(|corpus| corpus.histogram(10).ok_or_else(errors::not_enough_data))
.map(Into::into) .map(Into::into))
.boxed()
} }
fn unsigned_transactions_count(&self) -> Result<usize, Error> { fn unsigned_transactions_count(&self) -> Result<usize, Error> {
@ -316,7 +314,7 @@ impl Parity for ParityClient {
} }
fn next_nonce(&self, address: H160) -> BoxFuture<U256, Error> { fn next_nonce(&self, address: H160) -> BoxFuture<U256, Error> {
self.light_dispatch.next_nonce(address.into()).map(Into::into).boxed() Box::new(self.light_dispatch.next_nonce(address.into()).map(Into::into))
} }
fn mode(&self) -> Result<String, Error> { fn mode(&self) -> Result<String, Error> {
@ -398,20 +396,19 @@ impl Parity for ParityClient {
} }
}; };
self.fetcher().header(number.unwrap_or_default().into()).map(from_encoded).boxed() Box::new(self.fetcher().header(number.unwrap_or_default().into()).map(from_encoded))
} }
fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> { fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> {
ipfs::cid(content) ipfs::cid(content)
} }
fn call(&self, _meta: Self::Metadata, _requests: Vec<CallRequest>, _block: Trailing<BlockNumber>) -> BoxFuture<Vec<Bytes>, Error> { fn call(&self, _meta: Self::Metadata, _requests: Vec<CallRequest>, _block: Trailing<BlockNumber>) -> Result<Vec<Bytes>, Error> {
future::err(errors::light_unimplemented(None)).boxed() Err(errors::light_unimplemented(None))
} }
fn node_health(&self) -> BoxFuture<Health, Error> { fn node_health(&self) -> BoxFuture<Health, Error> {
self.health.health() Box::new(self.health.health()
.map_err(|err| errors::internal("Health API failure.", err)) .map_err(|err| errors::internal("Health API failure.", err)))
.boxed()
} }
} }

View File

@ -22,10 +22,10 @@ use std::sync::Arc;
use ethsync::ManageNetwork; use ethsync::ManageNetwork;
use fetch::Fetch; use fetch::Fetch;
use futures::{BoxFuture, Future};
use hash::keccak_buffer; use hash::keccak_buffer;
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::Future;
use v1::helpers::dapps::DappsService; use v1::helpers::dapps::DappsService;
use v1::helpers::errors; use v1::helpers::errors;
use v1::traits::ParitySet; use v1::traits::ParitySet;

View File

@ -17,7 +17,6 @@
//! Traces api implementation. //! Traces api implementation.
use jsonrpc_core::Error; use jsonrpc_core::Error;
use jsonrpc_core::futures::{future, Future, BoxFuture};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use v1::Metadata; use v1::Metadata;
use v1::traits::Traces; use v1::traits::Traces;
@ -47,12 +46,12 @@ impl Traces for TracesClient {
Err(errors::light_unimplemented(None)) Err(errors::light_unimplemented(None))
} }
fn call(&self, _meta: Self::Metadata, _request: CallRequest, _flags: TraceOptions, _block: Trailing<BlockNumber>) -> BoxFuture<TraceResults, Error> { fn call(&self, _meta: Self::Metadata, _request: CallRequest, _flags: TraceOptions, _block: Trailing<BlockNumber>) -> Result<TraceResults, Error> {
future::err(errors::light_unimplemented(None)).boxed() Err(errors::light_unimplemented(None))
} }
fn call_many(&self, _meta: Self::Metadata, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing<BlockNumber>) -> BoxFuture<Vec<TraceResults>, Error> { fn call_many(&self, _meta: Self::Metadata, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing<BlockNumber>) -> Result<Vec<TraceResults>, Error> {
future::err(errors::light_unimplemented(None)).boxed() Err(errors::light_unimplemented(None))
} }
fn raw_transaction(&self, _raw_transaction: Bytes, _flags: TraceOptions, _block: Trailing<BlockNumber>) -> Result<TraceResults, Error> { fn raw_transaction(&self, _raw_transaction: Bytes, _flags: TraceOptions, _block: Trailing<BlockNumber>) -> Result<TraceResults, Error> {

View File

@ -18,7 +18,6 @@
use std::sync::Arc; use std::sync::Arc;
use std::str::FromStr; use std::str::FromStr;
use std::collections::{BTreeMap, HashSet}; use std::collections::{BTreeMap, HashSet};
use futures::{future, Future, BoxFuture};
use util::Address; use util::Address;
use util::misc::version_data; use util::misc::version_data;
@ -32,12 +31,12 @@ use ethcore::client::{MiningBlockChainClient};
use ethcore::ids::BlockId; use ethcore::ids::BlockId;
use ethcore::miner::MinerService; use ethcore::miner::MinerService;
use ethcore::mode::Mode; use ethcore::mode::Mode;
use ethcore::transaction::SignedTransaction;
use ethcore_logger::RotatingLogger; use ethcore_logger::RotatingLogger;
use node_health::{NodeHealth, Health}; use node_health::{NodeHealth, Health};
use updater::{Service as UpdateService}; use updater::{Service as UpdateService};
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::{future, Future};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use v1::helpers::{self, errors, fake_sign, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::{self, errors, fake_sign, ipfs, SigningQueue, SignerService, NetworkSettings};
use v1::helpers::accounts::unwrap_provider; use v1::helpers::accounts::unwrap_provider;
@ -157,15 +156,14 @@ impl<C, M, U> Parity for ParityClient<C, M, U> where
Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?) Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?)
} }
fn default_account(&self, meta: Self::Metadata) -> BoxFuture<H160, Error> { fn default_account(&self, meta: Self::Metadata) -> Result<H160, Error> {
let dapp_id = meta.dapp_id(); let dapp_id = meta.dapp_id();
future::ok(
try_bf!(self.account_provider()) Ok(self.account_provider()?
.dapp_default_address(dapp_id.into()) .dapp_default_address(dapp_id.into())
.map(Into::into) .map(Into::into)
.ok() .ok()
.unwrap_or_default() .unwrap_or_default())
).boxed()
} }
fn transactions_limit(&self) -> Result<usize, Error> { fn transactions_limit(&self) -> Result<usize, Error> {
@ -253,12 +251,12 @@ impl<C, M, U> Parity for ParityClient<C, M, U> where
} }
fn gas_price_histogram(&self) -> BoxFuture<Histogram, Error> { fn gas_price_histogram(&self) -> BoxFuture<Histogram, Error> {
future::done(self.client Box::new(future::done(self.client
.gas_price_corpus(100) .gas_price_corpus(100)
.histogram(10) .histogram(10)
.ok_or_else(errors::not_enough_data) .ok_or_else(errors::not_enough_data)
.map(Into::into) .map(Into::into)
).boxed() ))
} }
fn unsigned_transactions_count(&self) -> Result<usize, Error> { fn unsigned_transactions_count(&self) -> Result<usize, Error> {
@ -340,11 +338,11 @@ impl<C, M, U> Parity for ParityClient<C, M, U> where
fn next_nonce(&self, address: H160) -> BoxFuture<U256, Error> { fn next_nonce(&self, address: H160) -> BoxFuture<U256, Error> {
let address: Address = address.into(); let address: Address = address.into();
future::ok(self.miner.last_nonce(&address) Box::new(future::ok(self.miner.last_nonce(&address)
.map(|n| n + 1.into()) .map(|n| n + 1.into())
.unwrap_or_else(|| self.client.latest_nonce(&address)) .unwrap_or_else(|| self.client.latest_nonce(&address))
.into() .into()
).boxed() ))
} }
fn mode(&self) -> Result<String, Error> { fn mode(&self) -> Result<String, Error> {
@ -403,41 +401,37 @@ impl<C, M, U> Parity for ParityClient<C, M, U> where
let id: BlockId = number.unwrap_or_default().into(); let id: BlockId = number.unwrap_or_default().into();
let encoded = match self.client.block_header(id.clone()) { let encoded = match self.client.block_header(id.clone()) {
Some(encoded) => encoded, Some(encoded) => encoded,
None => return future::err(errors::unknown_block()).boxed(), None => return Box::new(future::err(errors::unknown_block())),
}; };
future::ok(RichHeader { Box::new(future::ok(RichHeader {
inner: encoded.into(), inner: encoded.into(),
extra_info: self.client.block_extra_info(id).expect(EXTRA_INFO_PROOF), extra_info: self.client.block_extra_info(id).expect(EXTRA_INFO_PROOF),
}).boxed() }))
} }
fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> { fn ipfs_cid(&self, content: Bytes) -> Result<String, Error> {
ipfs::cid(content) ipfs::cid(content)
} }
fn call(&self, meta: Self::Metadata, requests: Vec<CallRequest>, block: Trailing<BlockNumber>) -> BoxFuture<Vec<Bytes>, Error> { fn call(&self, meta: Self::Metadata, requests: Vec<CallRequest>, block: Trailing<BlockNumber>) -> Result<Vec<Bytes>, Error> {
let requests: Result<Vec<(SignedTransaction, _)>, Error> = requests let requests = requests
.into_iter() .into_iter()
.map(|request| Ok(( .map(|request| Ok((
fake_sign::sign_call(&self.client, &self.miner, request.into(), meta.is_dapp())?, fake_sign::sign_call(&self.client, &self.miner, request.into(), meta.is_dapp())?,
Default::default() Default::default()
))) )))
.collect(); .collect::<Result<Vec<_>, Error>>()?;
let block = block.unwrap_or_default(); let block = block.unwrap_or_default();
let requests = try_bf!(requests);
let result = self.client.call_many(&requests, block.into()) self.client.call_many(&requests, block.into())
.map(|res| res.into_iter().map(|res| res.output.into()).collect()) .map(|res| res.into_iter().map(|res| res.output.into()).collect())
.map_err(errors::call); .map_err(errors::call)
future::done(result).boxed()
} }
fn node_health(&self) -> BoxFuture<Health, Error> { fn node_health(&self) -> BoxFuture<Health, Error> {
self.health.health() Box::new(self.health.health()
.map_err(|err| errors::internal("Health API failure.", err)) .map_err(|err| errors::internal("Health API failure.", err)))
.boxed()
} }
} }

View File

@ -68,7 +68,7 @@ impl ParityAccounts for ParityAccountsClient {
for (address, account) in account_iter { for (address, account) in account_iter {
match accounts.entry(address) { match accounts.entry(address) {
/// Insert only if occupied entry isn't already an account with UUID // Insert only if occupied entry isn't already an account with UUID
Entry::Occupied(ref mut occupied) if occupied.get().uuid.is_none() => { Entry::Occupied(ref mut occupied) if occupied.get().uuid.is_none() => {
occupied.insert(account); occupied.insert(account);
}, },

View File

@ -23,11 +23,11 @@ use ethcore::client::MiningBlockChainClient;
use ethcore::mode::Mode; use ethcore::mode::Mode;
use ethsync::ManageNetwork; use ethsync::ManageNetwork;
use fetch::{self, Fetch}; use fetch::{self, Fetch};
use futures::{BoxFuture, Future};
use hash::keccak_buffer; use hash::keccak_buffer;
use updater::{Service as UpdateService}; use updater::{Service as UpdateService};
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::futures::Future;
use v1::helpers::dapps::DappsService; use v1::helpers::dapps::DappsService;
use v1::helpers::errors; use v1::helpers::errors;
use v1::traits::ParitySet; use v1::traits::ParitySet;

View File

@ -24,8 +24,8 @@ use bigint::prelude::U128;
use util::Address; use util::Address;
use bytes::ToPretty; use bytes::ToPretty;
use futures::{future, Future, BoxFuture}; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::Error; use jsonrpc_core::futures::{future, Future};
use v1::helpers::errors; use v1::helpers::errors;
use v1::helpers::dispatch::{Dispatcher, SignWith}; use v1::helpers::dispatch::{Dispatcher, SignWith};
use v1::helpers::accounts::unwrap_provider; use v1::helpers::accounts::unwrap_provider;
@ -114,10 +114,10 @@ impl<D: Dispatcher + 'static> Personal for PersonalClient<D> {
let default = match default { let default = match default {
Ok(default) => default, Ok(default) => default,
Err(e) => return future::err(e).boxed(), Err(e) => return Box::new(future::err(e)),
}; };
dispatcher.fill_optional_fields(request.into(), default, false) Box::new(dispatcher.fill_optional_fields(request.into(), default, false)
.and_then(move |filled| { .and_then(move |filled| {
let condition = filled.condition.clone().map(Into::into); let condition = filled.condition.clone().map(Into::into);
dispatcher.sign(accounts, filled, SignWith::Password(password)) dispatcher.sign(accounts, filled, SignWith::Password(password))
@ -131,8 +131,7 @@ impl<D: Dispatcher + 'static> Personal for PersonalClient<D> {
::rlp::encode(&*pending_tx).into_vec().pretty(), chain_id); ::rlp::encode(&*pending_tx).into_vec().pretty(), chain_id);
dispatcher.dispatch_transaction(pending_tx).map(Into::into) dispatcher.dispatch_transaction(pending_tx).map(Into::into)
}) }))
.boxed()
} }
fn sign_and_send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<RpcH256, Error> { fn sign_and_send_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<RpcH256, Error> {

View File

@ -20,8 +20,8 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use parking_lot::RwLock; use parking_lot::RwLock;
use futures::{self, BoxFuture, Future, Stream, Sink};
use jsonrpc_core::{self as core, Error, MetaIoHandler}; use jsonrpc_core::{self as core, Error, MetaIoHandler};
use jsonrpc_core::futures::{Future, Stream, Sink};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use jsonrpc_macros::pubsub::Subscriber; use jsonrpc_macros::pubsub::Subscriber;
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
@ -94,8 +94,8 @@ impl<S: core::Middleware<Metadata>> PubSub for PubSubClient<S> {
} }
} }
fn parity_unsubscribe(&self, id: SubscriptionId) -> BoxFuture<bool, Error> { fn parity_unsubscribe(&self, id: SubscriptionId) -> Result<bool, Error> {
let res = self.poll_manager.write().unsubscribe(&id); let res = self.poll_manager.write().unsubscribe(&id);
futures::future::ok(res).boxed() Ok(res)
} }
} }

View File

@ -21,12 +21,13 @@ use std::sync::Arc;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use ethcore::transaction::{SignedTransaction, PendingTransaction}; use ethcore::transaction::{SignedTransaction, PendingTransaction};
use ethkey; use ethkey;
use futures::{future, BoxFuture, Future, IntoFuture};
use parity_reactor::Remote; use parity_reactor::Remote;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use parking_lot::Mutex; use parking_lot::Mutex;
use jsonrpc_core::{futures, Error}; use jsonrpc_core::{Error, BoxFuture};
use jsonrpc_core::futures::{future, Future, IntoFuture};
use jsonrpc_core::futures::future::Either;
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
use jsonrpc_macros::pubsub::{Sink, Subscriber}; use jsonrpc_macros::pubsub::{Sink, Subscriber};
use v1::helpers::accounts::unwrap_provider; use v1::helpers::accounts::unwrap_provider;
@ -87,18 +88,11 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
T::Future: Send + 'static T::Future: Send + 'static
{ {
let id = id.into(); let id = id.into();
let accounts = try_bf!(self.account_provider());
let dispatcher = self.dispatcher.clone(); let dispatcher = self.dispatcher.clone();
let signer = self.signer.clone();
let setup = || { Box::new(signer.peek(&id).map(|confirmation| {
Ok((self.account_provider()?, self.signer.clone()))
};
let (accounts, signer) = match setup() {
Ok(x) => x,
Err(e) => return future::err(e).boxed(),
};
signer.peek(&id).map(|confirmation| {
let mut payload = confirmation.payload.clone(); let mut payload = confirmation.payload.clone();
// Modify payload // Modify payload
if let ConfirmationPayload::SendTransaction(ref mut request) = payload { if let ConfirmationPayload::SendTransaction(ref mut request) = payload {
@ -118,16 +112,16 @@ impl<D: Dispatcher + 'static> SignerClient<D> {
} }
} }
let fut = f(dispatcher, accounts, payload); let fut = f(dispatcher, accounts, payload);
fut.into_future().then(move |result| { Either::A(fut.into_future().then(move |result| {
// Execute // Execute
if let Ok(ref response) = result { if let Ok(ref response) = result {
signer.request_confirmed(id, Ok((*response).clone())); signer.request_confirmed(id, Ok((*response).clone()));
} }
result result
}).boxed() }))
}) })
.unwrap_or_else(|| future::err(errors::invalid_params("Unknown RequestID", id)).boxed()) .unwrap_or_else(|| Either::B(future::err(errors::invalid_params("Unknown RequestID", id)))))
} }
fn verify_transaction<F>(bytes: Bytes, request: FilledTransactionRequest, process: F) -> Result<ConfirmationResponse, Error> where fn verify_transaction<F>(bytes: Bytes, request: FilledTransactionRequest, process: F) -> Result<ConfirmationResponse, Error> where
@ -178,15 +172,15 @@ impl<D: Dispatcher + 'static> Signer for SignerClient<D> {
fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String)
-> BoxFuture<ConfirmationResponse, Error> -> BoxFuture<ConfirmationResponse, Error>
{ {
self.confirm_internal(id, modification, move |dis, accounts, payload| { Box::new(self.confirm_internal(id, modification, move |dis, accounts, payload| {
dispatch::execute(dis, accounts, payload, dispatch::SignWith::Password(pass)) dispatch::execute(dis, accounts, payload, dispatch::SignWith::Password(pass))
}).map(|v| v.into_value()).boxed() }).map(|v| v.into_value()))
} }
fn confirm_request_with_token(&self, id: U256, modification: TransactionModification, token: String) fn confirm_request_with_token(&self, id: U256, modification: TransactionModification, token: String)
-> BoxFuture<ConfirmationResponseWithToken, Error> -> BoxFuture<ConfirmationResponseWithToken, Error>
{ {
self.confirm_internal(id, modification, move |dis, accounts, payload| { Box::new(self.confirm_internal(id, modification, move |dis, accounts, payload| {
dispatch::execute(dis, accounts, payload, dispatch::SignWith::Token(token)) dispatch::execute(dis, accounts, payload, dispatch::SignWith::Token(token))
}).and_then(|v| match v { }).and_then(|v| match v {
WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")), WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")),
@ -194,7 +188,7 @@ impl<D: Dispatcher + 'static> Signer for SignerClient<D> {
result: response, result: response,
token: token, token: token,
}), }),
}).boxed() }))
} }
fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result<ConfirmationResponse, Error> { fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result<ConfirmationResponse, Error> {
@ -253,8 +247,8 @@ impl<D: Dispatcher + 'static> Signer for SignerClient<D> {
self.subscribers.lock().push(sub) self.subscribers.lock().push(sub)
} }
fn unsubscribe_pending(&self, id: SubscriptionId) -> BoxFuture<bool, Error> { fn unsubscribe_pending(&self, id: SubscriptionId) -> Result<bool, Error> {
let res = self.subscribers.lock().remove(&id).is_some(); let res = self.subscribers.lock().remove(&id).is_some();
futures::future::ok(res).boxed() Ok(res)
} }
} }

View File

@ -23,8 +23,9 @@ use parking_lot::Mutex;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use futures::{future, BoxFuture, Future}; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::Error; use jsonrpc_core::futures::{future, Future};
use jsonrpc_core::futures::future::Either;
use v1::helpers::{ use v1::helpers::{
errors, oneshot, errors, oneshot,
DefaultAccount, DefaultAccount,
@ -115,23 +116,21 @@ impl<D: Dispatcher + 'static> SigningQueueClient<D> {
let dispatcher = self.dispatcher.clone(); let dispatcher = self.dispatcher.clone();
let signer = self.signer.clone(); let signer = self.signer.clone();
dispatch::from_rpc(payload, default_account, &dispatcher) Box::new(dispatch::from_rpc(payload, default_account, &dispatcher)
.and_then(move |payload| { .and_then(move |payload| {
let sender = payload.sender(); let sender = payload.sender();
if accounts.is_unlocked(sender) { if accounts.is_unlocked(sender) {
dispatch::execute(dispatcher, accounts, payload, dispatch::SignWith::Nothing) Either::A(dispatch::execute(dispatcher, accounts, payload, dispatch::SignWith::Nothing)
.map(|v| v.into_value()) .map(|v| v.into_value())
.map(DispatchResult::Value) .map(DispatchResult::Value))
.boxed()
} else { } else {
future::done( Either::B(future::done(
signer.add_request(payload, origin) signer.add_request(payload, origin)
.map(DispatchResult::Promise) .map(DispatchResult::Promise)
.map_err(|_| errors::request_rejected_limit()) .map_err(|_| errors::request_rejected_limit())
).boxed() ))
} }
}) }))
.boxed()
} }
} }
@ -141,12 +140,12 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture<RpcTransactionRequest, Error> { fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture<RpcTransactionRequest, Error> {
let accounts = try_bf!(self.account_provider()); let accounts = try_bf!(self.account_provider());
let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default(); let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default();
self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into).boxed() Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into))
} }
fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> { fn post_sign(&self, meta: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
let pending = self.pending.clone(); let pending = self.pending.clone();
self.dispatch( Box::new(self.dispatch(
RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()),
DefaultAccount::Provided(address.into()), DefaultAccount::Provided(address.into()),
meta.origin meta.origin
@ -160,13 +159,12 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
RpcEither::Either(id.into()) RpcEither::Either(id.into())
}, },
}) }))
.boxed()
} }
fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> { fn post_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
let pending = self.pending.clone(); let pending = self.pending.clone();
self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin) Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin)
.map(move |result| match result { .map(move |result| match result {
DispatchResult::Value(v) => RpcEither::Or(v), DispatchResult::Value(v) => RpcEither::Or(v),
DispatchResult::Promise(promise) => { DispatchResult::Promise(promise) => {
@ -177,8 +175,7 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
RpcEither::Either(id.into()) RpcEither::Either(id.into())
}, },
}) }))
.boxed()
} }
fn check_request(&self, id: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> { fn check_request(&self, id: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> {
@ -203,7 +200,7 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
let (ready, p) = oneshot::oneshot(); let (ready, p) = oneshot::oneshot();
// when dispatch is complete // when dispatch is complete
res.then(move |res| { Box::new(res.then(move |res| {
// register callback via the oneshot sender. // register callback via the oneshot sender.
handle_dispatch(res, move |response| { handle_dispatch(res, move |response| {
match response { match response {
@ -214,7 +211,7 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningQueueClient<D> {
}); });
p p
}).boxed() }))
} }
} }
@ -230,7 +227,7 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
let (ready, p) = oneshot::oneshot(); let (ready, p) = oneshot::oneshot();
res.then(move |res| { Box::new(res.then(move |res| {
handle_dispatch(res, move |response| { handle_dispatch(res, move |response| {
match response { match response {
Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)), Ok(RpcConfirmationResponse::Signature(sig)) => ready.send(Ok(sig)),
@ -240,7 +237,7 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
}); });
p p
}).boxed() }))
} }
fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcH256, Error> { fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcH256, Error> {
@ -252,7 +249,7 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
let (ready, p) = oneshot::oneshot(); let (ready, p) = oneshot::oneshot();
res.then(move |res| { Box::new(res.then(move |res| {
handle_dispatch(res, move |response| { handle_dispatch(res, move |response| {
match response { match response {
Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)), Ok(RpcConfirmationResponse::SendTransaction(hash)) => ready.send(Ok(hash)),
@ -262,7 +259,7 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
}); });
p p
}).boxed() }))
} }
fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcRichRawTransaction, Error> { fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcRichRawTransaction, Error> {
@ -274,7 +271,7 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
let (ready, p) = oneshot::oneshot(); let (ready, p) = oneshot::oneshot();
res.then(move |res| { Box::new(res.then(move |res| {
handle_dispatch(res, move |response| { handle_dispatch(res, move |response| {
match response { match response {
Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)), Ok(RpcConfirmationResponse::SignTransaction(tx)) => ready.send(Ok(tx)),
@ -284,6 +281,6 @@ impl<D: Dispatcher + 'static> EthSigning for SigningQueueClient<D> {
}); });
p p
}).boxed() }))
} }
} }

View File

@ -20,8 +20,8 @@ use std::sync::Arc;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use futures::{future, BoxFuture, Future}; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_core::Error; use jsonrpc_core::futures::{future, Future};
use v1::helpers::{errors, DefaultAccount}; use v1::helpers::{errors, DefaultAccount};
use v1::helpers::dispatch::{self, Dispatcher}; use v1::helpers::dispatch::{self, Dispatcher};
use v1::helpers::accounts::unwrap_provider; use v1::helpers::accounts::unwrap_provider;
@ -64,12 +64,11 @@ impl<D: Dispatcher + 'static> SigningUnsafeClient<D> {
}; };
let dis = self.dispatcher.clone(); let dis = self.dispatcher.clone();
dispatch::from_rpc(payload, default, &dis) Box::new(dispatch::from_rpc(payload, default, &dis)
.and_then(move |payload| { .and_then(move |payload| {
dispatch::execute(dis, accounts, payload, dispatch::SignWith::Nothing) dispatch::execute(dis, accounts, payload, dispatch::SignWith::Nothing)
}) })
.map(|v| v.into_value()) .map(|v| v.into_value()))
.boxed()
} }
} }
@ -78,33 +77,30 @@ impl<D: Dispatcher + 'static> EthSigning for SigningUnsafeClient<D>
type Metadata = Metadata; type Metadata = Metadata;
fn sign(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcH520, Error> { fn sign(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcH520, Error> {
self.handle(RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), address.into()) Box::new(self.handle(RpcConfirmationPayload::EthSignMessage((address.clone(), data).into()), address.into())
.then(|res| match res { .then(|res| match res {
Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature), Ok(RpcConfirmationResponse::Signature(signature)) => Ok(signature),
Err(e) => Err(e), Err(e) => Err(e),
e => Err(errors::internal("Unexpected result", e)), e => Err(errors::internal("Unexpected result", e)),
}) }))
.boxed()
} }
fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcH256, Error> { fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcH256, Error> {
self.handle(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into()) Box::new(self.handle(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into())
.then(|res| match res { .then(|res| match res {
Ok(RpcConfirmationResponse::SendTransaction(hash)) => Ok(hash), Ok(RpcConfirmationResponse::SendTransaction(hash)) => Ok(hash),
Err(e) => Err(e), Err(e) => Err(e),
e => Err(errors::internal("Unexpected result", e)), e => Err(errors::internal("Unexpected result", e)),
}) }))
.boxed()
} }
fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcRichRawTransaction, Error> { fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture<RpcRichRawTransaction, Error> {
self.handle(RpcConfirmationPayload::SignTransaction(request), meta.dapp_id().into()) Box::new(self.handle(RpcConfirmationPayload::SignTransaction(request), meta.dapp_id().into())
.then(|res| match res { .then(|res| match res {
Ok(RpcConfirmationResponse::SignTransaction(tx)) => Ok(tx), Ok(RpcConfirmationResponse::SignTransaction(tx)) => Ok(tx),
Err(e) => Err(e), Err(e) => Err(e),
e => Err(errors::internal("Unexpected result", e)), e => Err(errors::internal("Unexpected result", e)),
}) }))
.boxed()
} }
} }
@ -114,27 +110,26 @@ impl<D: Dispatcher + 'static> ParitySigning for SigningUnsafeClient<D> {
fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture<RpcTransactionRequest, Error> { fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture<RpcTransactionRequest, Error> {
let accounts = try_bf!(self.account_provider()); let accounts = try_bf!(self.account_provider());
let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default(); let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default();
self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into).boxed() Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into))
} }
fn decrypt_message(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcBytes, Error> { fn decrypt_message(&self, _: Metadata, address: RpcH160, data: RpcBytes) -> BoxFuture<RpcBytes, Error> {
self.handle(RpcConfirmationPayload::Decrypt((address.clone(), data).into()), address.into()) Box::new(self.handle(RpcConfirmationPayload::Decrypt((address.clone(), data).into()), address.into())
.then(|res| match res { .then(|res| match res {
Ok(RpcConfirmationResponse::Decrypt(data)) => Ok(data), Ok(RpcConfirmationResponse::Decrypt(data)) => Ok(data),
Err(e) => Err(e), Err(e) => Err(e),
e => Err(errors::internal("Unexpected result", e)), e => Err(errors::internal("Unexpected result", e)),
}) }))
.boxed()
} }
fn post_sign(&self, _: Metadata, _: RpcH160, _: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> { fn post_sign(&self, _: Metadata, _: RpcH160, _: RpcBytes) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
// We don't support this in non-signer mode. // We don't support this in non-signer mode.
future::err(errors::signer_disabled()).boxed() Box::new(future::err(errors::signer_disabled()))
} }
fn post_transaction(&self, _: Metadata, _: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> { fn post_transaction(&self, _: Metadata, _: RpcTransactionRequest) -> BoxFuture<RpcEither<RpcU256, RpcConfirmationResponse>, Error> {
// We don't support this in non-signer mode. // We don't support this in non-signer mode.
future::err((errors::signer_disabled())).boxed() Box::new(future::err((errors::signer_disabled())))
} }
fn check_request(&self, _: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> { fn check_request(&self, _: RpcU256) -> Result<Option<RpcConfirmationResponse>, Error> {

View File

@ -24,7 +24,6 @@ use ethcore::transaction::SignedTransaction;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use jsonrpc_core::Error; use jsonrpc_core::Error;
use jsonrpc_core::futures::{self, Future, BoxFuture};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use v1::Metadata; use v1::Metadata;
use v1::traits::Traces; use v1::traits::Traces;
@ -83,35 +82,31 @@ impl<C, M> Traces for TracesClient<C, M> where C: MiningBlockChainClient + 'stat
.map(LocalizedTrace::from)) .map(LocalizedTrace::from))
} }
fn call(&self, meta: Self::Metadata, request: CallRequest, flags: TraceOptions, block: Trailing<BlockNumber>) -> BoxFuture<TraceResults, Error> { fn call(&self, meta: Self::Metadata, request: CallRequest, flags: TraceOptions, block: Trailing<BlockNumber>) -> Result<TraceResults, Error> {
let block = block.unwrap_or_default(); let block = block.unwrap_or_default();
let request = CallRequest::into(request); let request = CallRequest::into(request);
let signed = try_bf!(fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())); let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?;
let res = self.client.call(&signed, to_call_analytics(flags), block.into()) self.client.call(&signed, to_call_analytics(flags), block.into())
.map(TraceResults::from) .map(TraceResults::from)
.map_err(errors::call); .map_err(errors::call)
futures::done(res).boxed()
} }
fn call_many(&self, meta: Self::Metadata, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing<BlockNumber>) -> BoxFuture<Vec<TraceResults>, Error> { fn call_many(&self, meta: Self::Metadata, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing<BlockNumber>) -> Result<Vec<TraceResults>, Error> {
let block = block.unwrap_or_default(); let block = block.unwrap_or_default();
let requests = try_bf!(requests.into_iter() let requests = requests.into_iter()
.map(|(request, flags)| { .map(|(request, flags)| {
let request = CallRequest::into(request); let request = CallRequest::into(request);
let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?; let signed = fake_sign::sign_call(&self.client, &self.miner, request, meta.is_dapp())?;
Ok((signed, to_call_analytics(flags))) Ok((signed, to_call_analytics(flags)))
}) })
.collect::<Result<Vec<_>, Error>>()); .collect::<Result<Vec<_>, Error>>()?;
let res = self.client.call_many(&requests, block.into()) self.client.call_many(&requests, block.into())
.map(|results| results.into_iter().map(TraceResults::from).collect()) .map(|results| results.into_iter().map(TraceResults::from).collect())
.map_err(errors::call); .map_err(errors::call)
futures::done(res).boxed()
} }
fn raw_transaction(&self, raw_transaction: Bytes, flags: TraceOptions, block: Trailing<BlockNumber>) -> Result<TraceResults, Error> { fn raw_transaction(&self, raw_transaction: Bytes, flags: TraceOptions, block: Trailing<BlockNumber>) -> Result<TraceResults, Error> {

View File

@ -16,7 +16,7 @@
//! Web3 rpc implementation. //! Web3 rpc implementation.
use hash::keccak; use hash::keccak;
use jsonrpc_core::*; use jsonrpc_core::Error;
use util::version; use util::version;
use v1::traits::Web3; use v1::traits::Web3;
use v1::types::{H256, Bytes}; use v1::types::{H256, Bytes};

View File

@ -20,7 +20,6 @@ use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{self, AtomicUsize}; use std::sync::atomic::{self, AtomicUsize};
use std::time; use std::time;
use futures::Future;
use futures_cpupool as pool; use futures_cpupool as pool;
use jsonrpc_core as rpc; use jsonrpc_core as rpc;
use order_stat; use order_stat;
@ -222,15 +221,23 @@ impl<M: rpc::Metadata, T: ActivityNotifier> rpc::Middleware<M> for Middleware<T>
self.notifier.active(); self.notifier.active();
self.stats.count_request(); self.stats.count_request();
let id = match request {
rpc::Request::Single(rpc::Call::MethodCall(ref call)) => Some(call.id.clone()),
_ => None,
};
let stats = self.stats.clone(); let stats = self.stats.clone();
let future = process(request, meta).map(move |res| { let future = process(request, meta).map(move |res| {
stats.add_roundtrip(Self::as_micro(start.elapsed())); let time = Self::as_micro(start.elapsed());
if time > 10_000 {
debug!(target: "rpc", "[{:?}] Took {}ms", id, time / 1_000);
}
stats.add_roundtrip(time);
res res
}); });
match self.pool { match self.pool {
Some(ref pool) => A(pool.spawn(future)), Some(ref pool) => A(pool.spawn(future)),
None => B(future.boxed()), None => B(Box::new(future)),
} }
} }
} }

View File

@ -24,7 +24,7 @@ macro_rules! try_bf {
($res: expr) => { ($res: expr) => {
match $res { match $res {
Ok(val) => val, Ok(val) => val,
Err(e) => return ::futures::future::err(e.into()).boxed(), Err(e) => return Box::new(::jsonrpc_core::futures::future::err(e.into())),
} }
} }
} }

View File

@ -17,7 +17,7 @@
//! Test implementation of fetch client. //! Test implementation of fetch client.
use std::{io, thread}; use std::{io, thread};
use futures::{self, Future}; use jsonrpc_core::futures::{self, Future};
use fetch::{self, Fetch}; use fetch::{self, Fetch};
/// Test implementation of fetcher. Will always return the same file. /// Test implementation of fetcher. Will always return the same file.
@ -25,7 +25,7 @@ use fetch::{self, Fetch};
pub struct TestFetch; pub struct TestFetch;
impl Fetch for TestFetch { impl Fetch for TestFetch {
type Result = futures::BoxFuture<fetch::Response, fetch::Error>; type Result = Box<Future<Item = fetch::Response, Error = fetch::Error> + Send + 'static>;
fn new() -> Result<Self, fetch::Error> where Self: Sized { fn new() -> Result<Self, fetch::Error> where Self: Sized {
Ok(TestFetch) Ok(TestFetch)
@ -38,6 +38,6 @@ impl Fetch for TestFetch {
tx.send(fetch::Response::from_reader(cursor)).unwrap(); tx.send(fetch::Response::from_reader(cursor)).unwrap();
}); });
rx.map_err(|_| fetch::Error::Aborted).boxed() Box::new(rx.map_err(|_| fetch::Error::Aborted))
} }
} }

View File

@ -20,6 +20,7 @@ use std::time::Duration;
use rlp; use rlp;
use jsonrpc_core::{IoHandler, Success}; use jsonrpc_core::{IoHandler, Success};
use jsonrpc_core::futures::Future;
use v1::impls::SigningQueueClient; use v1::impls::SigningQueueClient;
use v1::metadata::Metadata; use v1::metadata::Metadata;
use v1::traits::{EthSigning, ParitySigning, Parity}; use v1::traits::{EthSigning, ParitySigning, Parity};
@ -36,7 +37,6 @@ use ethcore::account_provider::AccountProvider;
use ethcore::client::TestBlockChainClient; use ethcore::client::TestBlockChainClient;
use ethcore::transaction::{Transaction, Action, SignedTransaction}; use ethcore::transaction::{Transaction, Action, SignedTransaction};
use ethstore::ethkey::{Generator, Random}; use ethstore::ethkey::{Generator, Random};
use futures::Future;
use serde_json; use serde_json;
struct SigningTester { struct SigningTester {

View File

@ -15,11 +15,9 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Eth rpc interface. //! Eth rpc interface.
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use futures::BoxFuture;
use v1::types::{RichBlock, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index}; use v1::types::{RichBlock, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index};
use v1::types::{Log, Receipt, SyncStatus, Transaction, Work}; use v1::types::{Log, Receipt, SyncStatus, Transaction, Work};
use v1::types::{H64, H160, H256, U256}; use v1::types::{H64, H160, H256, U256};
@ -43,7 +41,7 @@ build_rpc_trait! {
/// Returns block author. /// Returns block author.
#[rpc(meta, name = "eth_coinbase")] #[rpc(meta, name = "eth_coinbase")]
fn author(&self, Self::Metadata) -> BoxFuture<H160, Error>; fn author(&self, Self::Metadata) -> Result<H160, Error>;
/// Returns true if client is actively mining new blocks. /// Returns true if client is actively mining new blocks.
#[rpc(name = "eth_mining")] #[rpc(name = "eth_mining")]
@ -55,50 +53,50 @@ build_rpc_trait! {
/// Returns accounts list. /// Returns accounts list.
#[rpc(meta, name = "eth_accounts")] #[rpc(meta, name = "eth_accounts")]
fn accounts(&self, Self::Metadata) -> BoxFuture<Vec<H160>, Error>; fn accounts(&self, Self::Metadata) -> Result<Vec<H160>, Error>;
/// Returns highest block number. /// Returns highest block number.
#[rpc(name = "eth_blockNumber")] #[rpc(name = "eth_blockNumber")]
fn block_number(&self) -> Result<U256, Error>; fn block_number(&self) -> Result<U256, Error>;
/// Returns balance of the given account. /// Returns balance of the given account.
#[rpc(async, name = "eth_getBalance")] #[rpc(name = "eth_getBalance")]
fn balance(&self, H160, Trailing<BlockNumber>) -> BoxFuture<U256, Error>; fn balance(&self, H160, Trailing<BlockNumber>) -> BoxFuture<U256, Error>;
/// Returns content of the storage at given address. /// Returns content of the storage at given address.
#[rpc(async, name = "eth_getStorageAt")] #[rpc(name = "eth_getStorageAt")]
fn storage_at(&self, H160, U256, Trailing<BlockNumber>) -> BoxFuture<H256, Error>; fn storage_at(&self, H160, U256, Trailing<BlockNumber>) -> BoxFuture<H256, Error>;
/// Returns block with given hash. /// Returns block with given hash.
#[rpc(async, name = "eth_getBlockByHash")] #[rpc(name = "eth_getBlockByHash")]
fn block_by_hash(&self, H256, bool) -> BoxFuture<Option<RichBlock>, Error>; fn block_by_hash(&self, H256, bool) -> BoxFuture<Option<RichBlock>, Error>;
/// Returns block with given number. /// Returns block with given number.
#[rpc(async, name = "eth_getBlockByNumber")] #[rpc(name = "eth_getBlockByNumber")]
fn block_by_number(&self, BlockNumber, bool) -> BoxFuture<Option<RichBlock>, Error>; fn block_by_number(&self, BlockNumber, bool) -> BoxFuture<Option<RichBlock>, Error>;
/// Returns the number of transactions sent from given address at given time (block number). /// Returns the number of transactions sent from given address at given time (block number).
#[rpc(async, name = "eth_getTransactionCount")] #[rpc(name = "eth_getTransactionCount")]
fn transaction_count(&self, H160, Trailing<BlockNumber>) -> BoxFuture<U256, Error>; fn transaction_count(&self, H160, Trailing<BlockNumber>) -> BoxFuture<U256, Error>;
/// Returns the number of transactions in a block with given hash. /// Returns the number of transactions in a block with given hash.
#[rpc(async, name = "eth_getBlockTransactionCountByHash")] #[rpc(name = "eth_getBlockTransactionCountByHash")]
fn block_transaction_count_by_hash(&self, H256) -> BoxFuture<Option<U256>, Error>; fn block_transaction_count_by_hash(&self, H256) -> BoxFuture<Option<U256>, Error>;
/// Returns the number of transactions in a block with given block number. /// Returns the number of transactions in a block with given block number.
#[rpc(async, name = "eth_getBlockTransactionCountByNumber")] #[rpc(name = "eth_getBlockTransactionCountByNumber")]
fn block_transaction_count_by_number(&self, BlockNumber) -> BoxFuture<Option<U256>, Error>; fn block_transaction_count_by_number(&self, BlockNumber) -> BoxFuture<Option<U256>, Error>;
/// Returns the number of uncles in a block with given hash. /// Returns the number of uncles in a block with given hash.
#[rpc(async, name = "eth_getUncleCountByBlockHash")] #[rpc(name = "eth_getUncleCountByBlockHash")]
fn block_uncles_count_by_hash(&self, H256) -> BoxFuture<Option<U256>, Error>; fn block_uncles_count_by_hash(&self, H256) -> BoxFuture<Option<U256>, Error>;
/// Returns the number of uncles in a block with given block number. /// Returns the number of uncles in a block with given block number.
#[rpc(async, name = "eth_getUncleCountByBlockNumber")] #[rpc(name = "eth_getUncleCountByBlockNumber")]
fn block_uncles_count_by_number(&self, BlockNumber) -> BoxFuture<Option<U256>, Error>; fn block_uncles_count_by_number(&self, BlockNumber) -> BoxFuture<Option<U256>, Error>;
/// Returns the code at given address at given time (block number). /// Returns the code at given address at given time (block number).
#[rpc(async, name = "eth_getCode")] #[rpc(name = "eth_getCode")]
fn code_at(&self, H160, Trailing<BlockNumber>) -> BoxFuture<Bytes, Error>; fn code_at(&self, H160, Trailing<BlockNumber>) -> BoxFuture<Bytes, Error>;
/// Sends signed transaction, returning its hash. /// Sends signed transaction, returning its hash.
@ -162,7 +160,7 @@ build_rpc_trait! {
fn compile_serpent(&self, String) -> Result<Bytes, Error>; fn compile_serpent(&self, String) -> Result<Bytes, Error>;
/// Returns logs matching given filter object. /// Returns logs matching given filter object.
#[rpc(async, name = "eth_getLogs")] #[rpc(name = "eth_getLogs")]
fn logs(&self, Filter) -> BoxFuture<Vec<Log>, Error>; fn logs(&self, Filter) -> BoxFuture<Vec<Log>, Error>;
/// Returns the hash of the current block, the seedHash, and the boundary condition to be met. /// Returns the hash of the current block, the seedHash, and the boundary condition to be met.
@ -196,11 +194,11 @@ build_rpc_trait! {
fn new_pending_transaction_filter(&self) -> Result<U256, Error>; fn new_pending_transaction_filter(&self) -> Result<U256, Error>;
/// Returns filter changes since last poll. /// Returns filter changes since last poll.
#[rpc(async, name = "eth_getFilterChanges")] #[rpc(name = "eth_getFilterChanges")]
fn filter_changes(&self, Index) -> BoxFuture<FilterChanges, Error>; fn filter_changes(&self, Index) -> BoxFuture<FilterChanges, Error>;
/// Returns all logs matching given filter (in a range 'from' - 'to'). /// Returns all logs matching given filter (in a range 'from' - 'to').
#[rpc(async, name = "eth_getFilterLogs")] #[rpc(name = "eth_getFilterLogs")]
fn filter_logs(&self, Index) -> BoxFuture<Vec<Log>, Error>; fn filter_logs(&self, Index) -> BoxFuture<Vec<Log>, Error>;
/// Uninstalls filter. /// Uninstalls filter.

View File

@ -20,7 +20,6 @@ use jsonrpc_core::Error;
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use jsonrpc_macros::pubsub::Subscriber; use jsonrpc_macros::pubsub::Subscriber;
use jsonrpc_pubsub::SubscriptionId; use jsonrpc_pubsub::SubscriptionId;
use futures::BoxFuture;
use v1::types::pubsub; use v1::types::pubsub;
@ -36,7 +35,7 @@ build_rpc_trait! {
/// Unsubscribe from existing Eth subscription. /// Unsubscribe from existing Eth subscription.
#[rpc(name = "eth_unsubscribe")] #[rpc(name = "eth_unsubscribe")]
fn unsubscribe(&self, SubscriptionId) -> BoxFuture<bool, Error>; fn unsubscribe(&self, SubscriptionId) -> Result<bool, Error>;
} }
} }
} }

View File

@ -16,8 +16,7 @@
//! Eth rpc interface. //! Eth rpc interface.
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use futures::BoxFuture;
use v1::types::{Bytes, H160, H256, H520, TransactionRequest, RichRawTransaction}; use v1::types::{Bytes, H160, H256, H520, TransactionRequest, RichRawTransaction};

View File

@ -18,9 +18,8 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use jsonrpc_macros::Trailing; use jsonrpc_macros::Trailing;
use futures::BoxFuture;
use node_health::Health; use node_health::Health;
use v1::types::{ use v1::types::{
@ -51,7 +50,7 @@ build_rpc_trait! {
/// Returns default account for dapp. /// Returns default account for dapp.
#[rpc(meta, name = "parity_defaultAccount")] #[rpc(meta, name = "parity_defaultAccount")]
fn default_account(&self, Self::Metadata) -> BoxFuture<H160, Error>; fn default_account(&self, Self::Metadata) -> Result<H160, Error>;
/// Returns current transactions limit. /// Returns current transactions limit.
#[rpc(name = "parity_transactionsLimit")] #[rpc(name = "parity_transactionsLimit")]
@ -106,7 +105,7 @@ build_rpc_trait! {
fn default_extra_data(&self) -> Result<Bytes, Error>; fn default_extra_data(&self) -> Result<Bytes, Error>;
/// Returns distribution of gas price in latest blocks. /// Returns distribution of gas price in latest blocks.
#[rpc(async, name = "parity_gasPriceHistogram")] #[rpc(name = "parity_gasPriceHistogram")]
fn gas_price_histogram(&self) -> BoxFuture<Histogram, Error>; fn gas_price_histogram(&self) -> BoxFuture<Histogram, Error>;
/// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled)
@ -165,7 +164,7 @@ build_rpc_trait! {
fn ws_url(&self) -> Result<String, Error>; fn ws_url(&self) -> Result<String, Error>;
/// Returns next nonce for particular sender. Should include all transactions in the queue. /// Returns next nonce for particular sender. Should include all transactions in the queue.
#[rpc(async, name = "parity_nextNonce")] #[rpc(name = "parity_nextNonce")]
fn next_nonce(&self, H160) -> BoxFuture<U256, Error>; fn next_nonce(&self, H160) -> BoxFuture<U256, Error>;
/// Get the mode. Returns one of: "active", "passive", "dark", "offline". /// Get the mode. Returns one of: "active", "passive", "dark", "offline".
@ -208,7 +207,7 @@ build_rpc_trait! {
/// Get block header. /// Get block header.
/// Same as `eth_getBlockByNumber` but without uncles and transactions. /// Same as `eth_getBlockByNumber` but without uncles and transactions.
#[rpc(async, name = "parity_getBlockHeaderByNumber")] #[rpc(name = "parity_getBlockHeaderByNumber")]
fn block_header(&self, Trailing<BlockNumber>) -> BoxFuture<RichHeader, Error>; fn block_header(&self, Trailing<BlockNumber>) -> BoxFuture<RichHeader, Error>;
/// Get IPFS CIDv0 given protobuf encoded bytes. /// Get IPFS CIDv0 given protobuf encoded bytes.
@ -217,10 +216,10 @@ build_rpc_trait! {
/// Call contract, returning the output data. /// Call contract, returning the output data.
#[rpc(meta, name = "parity_call")] #[rpc(meta, name = "parity_call")]
fn call(&self, Self::Metadata, Vec<CallRequest>, Trailing<BlockNumber>) -> BoxFuture<Vec<Bytes>, Error>; fn call(&self, Self::Metadata, Vec<CallRequest>, Trailing<BlockNumber>) -> Result<Vec<Bytes>, Error>;
/// Returns node's health report. /// Returns node's health report.
#[rpc(async, name = "parity_nodeHealth")] #[rpc(name = "parity_nodeHealth")]
fn node_health(&self) -> BoxFuture<Health, Error>; fn node_health(&self) -> BoxFuture<Health, Error>;
} }
} }

View File

@ -16,8 +16,7 @@
//! Parity-specific rpc interface for operations altering the settings. //! Parity-specific rpc interface for operations altering the settings.
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use futures::BoxFuture;
use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp};
@ -93,7 +92,7 @@ build_rpc_trait! {
fn set_spec_name(&self, String) -> Result<bool, Error>; fn set_spec_name(&self, String) -> Result<bool, Error>;
/// Hash a file content under given URL. /// Hash a file content under given URL.
#[rpc(async, name = "parity_hashContent")] #[rpc(name = "parity_hashContent")]
fn hash_content(&self, String) -> BoxFuture<H256, Error>; fn hash_content(&self, String) -> BoxFuture<H256, Error>;
/// Returns true if refresh successful, error if unsuccessful or server is disabled. /// Returns true if refresh successful, error if unsuccessful or server is disabled.

View File

@ -15,8 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! ParitySigning rpc interface. //! ParitySigning rpc interface.
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use futures::BoxFuture;
use v1::types::{U256, H160, Bytes, ConfirmationResponse, TransactionRequest, Either}; use v1::types::{U256, H160, Bytes, ConfirmationResponse, TransactionRequest, Either};

View File

@ -15,9 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Personal rpc interface. //! Personal rpc interface.
use jsonrpc_core::Error; use jsonrpc_core::{BoxFuture, Error};
use futures::BoxFuture;
use v1::types::{U128, H160, H256, TransactionRequest}; use v1::types::{U128, H160, H256, TransactionRequest};

Some files were not shown because too many files have changed in this diff Show More