Merge branch 'master' into h256

Conflicts:
	ethcore/src/account.rs
This commit is contained in:
Tomasz Drwięga 2016-04-09 11:27:19 +02:00
commit 04d5b5cbe6
69 changed files with 1449 additions and 312 deletions

190
Cargo.lock generated
View File

@ -2,7 +2,7 @@
name = "parity" name = "parity"
version = "1.1.0" version = "1.1.0"
dependencies = [ dependencies = [
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)",
@ -11,6 +11,7 @@ dependencies = [
"ethcore-devtools 1.1.0", "ethcore-devtools 1.1.0",
"ethcore-rpc 1.1.0", "ethcore-rpc 1.1.0",
"ethcore-util 1.1.0", "ethcore-util 1.1.0",
"ethcore-webapp 1.1.0",
"ethminer 1.1.0", "ethminer 1.1.0",
"ethsync 1.1.0", "ethsync 1.1.0",
"fdlimit 0.1.0", "fdlimit 0.1.0",
@ -97,15 +98,24 @@ dependencies = [
[[package]] [[package]]
name = "clippy" name = "clippy"
version = "0.0.54" version = "0.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "conduit-mime-types"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "cookie" name = "cookie"
version = "0.1.21" version = "0.1.21"
@ -184,6 +194,15 @@ dependencies = [
"regex 0.1.61 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.1.61 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "error"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "eth-secp256k1" name = "eth-secp256k1"
version = "0.5.4" version = "0.5.4"
@ -211,7 +230,7 @@ dependencies = [
name = "ethcore" name = "ethcore"
version = "1.1.0" version = "1.1.0"
dependencies = [ dependencies = [
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.1.0", "ethash 1.1.0",
@ -238,14 +257,14 @@ dependencies = [
name = "ethcore-rpc" name = "ethcore-rpc"
version = "1.1.0" version = "1.1.0"
dependencies = [ dependencies = [
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.1.0", "ethash 1.1.0",
"ethcore 1.1.0", "ethcore 1.1.0",
"ethcore-util 1.1.0", "ethcore-util 1.1.0",
"ethminer 1.1.0", "ethminer 1.1.0",
"ethsync 1.1.0", "ethsync 1.1.0",
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 3.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 5.0.0 (git+https://github.com/debris/jsonrpc-http-server.git)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -262,7 +281,7 @@ dependencies = [
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"bigint 0.1.0", "bigint 0.1.0",
"chrono 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -290,6 +309,23 @@ dependencies = [
"vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "ethcore-webapp"
version = "1.1.0"
dependencies = [
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-rpc 1.1.0",
"ethcore-util 1.1.0",
"hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"iron 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 4.0.0 (git+https://github.com/tomusdrw/jsonrpc-http-server.git?branch=old-hyper)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-status 0.1.4 (git+https://github.com/tomusdrw/parity-status.git)",
"parity-wallet 0.1.0 (git+https://github.com/tomusdrw/parity-wallet.git)",
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
]
[[package]] [[package]]
name = "ethjson" name = "ethjson"
version = "0.1.0" version = "0.1.0"
@ -306,7 +342,7 @@ dependencies = [
name = "ethminer" name = "ethminer"
version = "1.1.0" version = "1.1.0"
dependencies = [ dependencies = [
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.1.0", "ethcore 1.1.0",
"ethcore-util 1.1.0", "ethcore-util 1.1.0",
@ -320,7 +356,7 @@ dependencies = [
name = "ethsync" name = "ethsync"
version = "1.1.0" version = "1.1.0"
dependencies = [ dependencies = [
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.1.0", "ethcore 1.1.0",
"ethcore-util 1.1.0", "ethcore-util 1.1.0",
@ -414,6 +450,27 @@ dependencies = [
"url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "hyper"
version = "0.9.0-mio"
source = "git+https://github.com/hyperium/hyper?branch=mio#d55a70dc56dac1f0f03bc4c3a83db0314d48e69a"
dependencies = [
"cookie 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"mime 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rotor 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
"vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "igd" name = "igd"
version = "0.4.2" version = "0.4.2"
@ -426,6 +483,23 @@ dependencies = [
"xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "iron"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"conduit-mime-types 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"error 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"modifier 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"plugin 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "itertools" name = "itertools"
version = "0.4.11" version = "0.4.11"
@ -452,14 +526,24 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-http-server" name = "jsonrpc-http-server"
version = "3.0.1" version = "4.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/tomusdrw/jsonrpc-http-server.git?branch=old-hyper#46bd4e7cf8352e0efc940cf76d3dff99f1a3da15"
dependencies = [ dependencies = [
"hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "jsonrpc-http-server"
version = "5.0.0"
source = "git+https://github.com/debris/jsonrpc-http-server.git#76fa443982b40665721fe6b1ece42fc0a53be996"
dependencies = [
"hyper 0.9.0-mio (git+https://github.com/hyperium/hyper?branch=mio)",
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "kernel32-sys" name = "kernel32-sys"
version = "0.2.1" version = "0.2.1"
@ -572,6 +656,11 @@ dependencies = [
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "modifier"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "net2" name = "net2"
version = "0.2.23" version = "0.2.23"
@ -636,6 +725,35 @@ name = "odds"
version = "0.2.12" version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "parity-status"
version = "0.1.4"
source = "git+https://github.com/tomusdrw/parity-status.git#380d13c8aafc3847a731968a6532edb09c78f2cf"
dependencies = [
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
]
[[package]]
name = "parity-wallet"
version = "0.1.0"
source = "git+https://github.com/tomusdrw/parity-wallet.git#9b0253f5cb88b31417450ca8be708cab2e437dfc"
dependencies = [
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
]
[[package]]
name = "parity-webapp"
version = "0.1.0"
source = "git+https://github.com/tomusdrw/parity-webapp.git#a24297256bae0ae0712c6478cd1ad681828b3800"
[[package]]
name = "plugin"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "primal" name = "primal"
version = "0.2.3" version = "0.2.3"
@ -695,6 +813,16 @@ dependencies = [
"syntex_syntax 0.30.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex_syntax 0.30.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "quick-error"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "quine-mc_cluskey"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "rand" name = "rand"
version = "0.3.14" version = "0.3.14"
@ -739,6 +867,18 @@ dependencies = [
"librocksdb-sys 0.2.3 (git+https://github.com/arkpar/rust-rocksdb.git)", "librocksdb-sys 0.2.3 (git+https://github.com/arkpar/rust-rocksdb.git)",
] ]
[[package]]
name = "rotor"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
"void 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "rpassword" name = "rpassword"
version = "0.1.3" version = "0.1.3"
@ -932,6 +1072,14 @@ name = "typeable"
version = "0.1.2" version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "typemap"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"unsafe-any 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "unicase" name = "unicase"
version = "1.4.0" version = "1.4.0"
@ -958,6 +1106,14 @@ name = "unicode-xid"
version = "0.0.3" version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unsafe-any"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "url" name = "url"
version = "0.2.38" version = "0.2.38"
@ -994,6 +1150,15 @@ dependencies = [
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "vecio"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "vergen" name = "vergen"
version = "0.1.0" version = "0.1.0"
@ -1003,6 +1168,11 @@ dependencies = [
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "void"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "winapi" name = "winapi"
version = "0.2.6" version = "0.2.6"

View File

@ -21,22 +21,26 @@ daemonize = "0.2"
num_cpus = "0.2" num_cpus = "0.2"
number_prefix = "0.2" number_prefix = "0.2"
rpassword = "0.1" rpassword = "0.1"
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
ethcore = { path = "ethcore" } ethcore = { path = "ethcore" }
ethcore-util = { path = "util" } ethcore-util = { path = "util" }
ethsync = { path = "sync" } ethsync = { path = "sync" }
ethminer = { path = "miner" } ethminer = { path = "miner" }
ethcore-devtools = { path = "devtools" } ethcore-devtools = { path = "devtools" }
ethcore-rpc = { path = "rpc", optional = true } ethcore-rpc = { path = "rpc", optional = true }
ethcore-webapp = { path = "webapp", optional = true }
[dependencies.hyper] [dependencies.hyper]
version = "0.8" version = "0.8"
default-features = false default-features = false
[features] [features]
default = ["rpc"] default = ["rpc", "webapp"]
rpc = ["ethcore-rpc"] rpc = ["ethcore-rpc"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"] webapp = ["ethcore-webapp"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev",
"ethcore-webapp/dev"]
travis-beta = ["ethcore/json-tests"] travis-beta = ["ethcore/json-tests"]
travis-nightly = ["ethcore/json-tests", "dev"] travis-nightly = ["ethcore/json-tests", "dev"]

2
cov.sh
View File

@ -23,6 +23,7 @@ cargo test \
-p ethcore-rpc \ -p ethcore-rpc \
-p parity \ -p parity \
-p ethminer \ -p ethminer \
-p ethcore-webapp \
--no-run || exit $? --no-run || exit $?
rm -rf target/coverage rm -rf target/coverage
mkdir -p target/coverage mkdir -p target/coverage
@ -33,5 +34,6 @@ kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage t
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-*
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethsync-* kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethsync-*
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-*
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_webapp-*
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethminer-* kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethminer-*
xdg-open target/coverage/index.html xdg-open target/coverage/index.html

1
doc.sh
View File

@ -7,5 +7,6 @@ cargo doc --no-deps --verbose \
-p ethcore \ -p ethcore \
-p ethsync \ -p ethsync \
-p ethcore-rpc \ -p ethcore-rpc \
-p ethcore-webapp \
-p parity \ -p parity \
-p ethminer -p ethminer

View File

@ -17,7 +17,7 @@ ethcore-util = { path = "../util" }
evmjit = { path = "../evmjit", optional = true } evmjit = { path = "../evmjit", optional = true }
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
num_cpus = "0.2" num_cpus = "0.2"
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
crossbeam = "0.1.5" crossbeam = "0.1.5"
lazy_static = "0.1" lazy_static = "0.1"
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }

View File

@ -154,7 +154,7 @@ impl ExecutedBlock {
} }
} }
/// Trait for a object that is_a `ExecutedBlock`. /// Trait for a object that is a `ExecutedBlock`.
pub trait IsBlock { pub trait IsBlock {
/// Get the block associated with this object. /// Get the block associated with this object.
fn block(&self) -> &ExecutedBlock; fn block(&self) -> &ExecutedBlock;
@ -192,7 +192,7 @@ pub struct OpenBlock<'x> {
last_hashes: LastHashes, last_hashes: LastHashes,
} }
/// Just like OpenBlock, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
/// and collected the uncles. /// and collected the uncles.
/// ///
/// There is no function available to push a transaction. /// There is no function available to push a transaction.
@ -204,7 +204,7 @@ pub struct ClosedBlock {
unclosed_state: State, unclosed_state: State,
} }
/// Just like ClosedBlock except that we can't reopen it and it's faster. /// Just like `ClosedBlock` except that we can't reopen it and it's faster.
/// ///
/// We actually store the post-`Engine::on_close_block` state, unlike in `ClosedBlock` where it's the pre. /// We actually store the post-`Engine::on_close_block` state, unlike in `ClosedBlock` where it's the pre.
#[derive(Clone)] #[derive(Clone)]
@ -216,14 +216,15 @@ pub struct LockedBlock {
/// A block that has a valid seal. /// A block that has a valid seal.
/// ///
/// The block's header has valid seal arguments. The block cannot be reversed into a ClosedBlock or OpenBlock. /// The block's header has valid seal arguments. The block cannot be reversed into a `ClosedBlock` or `OpenBlock`.
pub struct SealedBlock { pub struct SealedBlock {
block: ExecutedBlock, block: ExecutedBlock,
uncle_bytes: Bytes, uncle_bytes: Bytes,
} }
impl<'x> OpenBlock<'x> { impl<'x> OpenBlock<'x> {
/// Create a new OpenBlock ready for transaction pushing. #[cfg_attr(feature="dev", allow(too_many_arguments))]
/// Create a new `OpenBlock` ready for transaction pushing.
pub fn new(engine: &'x Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self { pub fn new(engine: &'x Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self {
let mut r = OpenBlock { let mut r = OpenBlock {
block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()), tracing), block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()), tracing),
@ -319,7 +320,7 @@ impl<'x> OpenBlock<'x> {
} }
} }
/// Turn this into a `ClosedBlock`. A BlockChain must be provided in order to figure out the uncles. /// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles.
pub fn close(self) -> ClosedBlock { pub fn close(self) -> ClosedBlock {
let mut s = self; let mut s = self;
@ -454,6 +455,7 @@ impl IsBlock for SealedBlock {
} }
/// Enact the block given by block header, transactions and uncles /// Enact the block given by block header, transactions and uncles
#[cfg_attr(feature="dev", allow(too_many_arguments))]
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<LockedBlock, Error> { pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<LockedBlock, Error> {
{ {
if ::log::max_log_level() >= ::log::LogLevel::Trace { if ::log::max_log_level() >= ::log::LogLevel::Trace {

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! A queue of blocks. Sits between network or other I/O and the BlockChain. //! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
//! Sorts them ready for blockchain insertion. //! Sorts them ready for blockchain insertion.
use std::thread::{JoinHandle, self}; use std::thread::{JoinHandle, self};
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
@ -89,7 +89,7 @@ impl BlockQueueInfo {
} }
} }
/// A queue of blocks. Sits between network or other I/O and the BlockChain. /// A queue of blocks. Sits between network or other I/O and the `BlockChain`.
/// Sorts them ready for blockchain insertion. /// Sorts them ready for blockchain insertion.
pub struct BlockQueue { pub struct BlockQueue {
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
@ -116,6 +116,7 @@ struct VerifyingBlock {
} }
struct QueueSignal { struct QueueSignal {
deleting: Arc<AtomicBool>,
signalled: AtomicBool, signalled: AtomicBool,
message_channel: IoChannel<NetSyncMessage>, message_channel: IoChannel<NetSyncMessage>,
} }
@ -123,10 +124,16 @@ struct QueueSignal {
impl QueueSignal { impl QueueSignal {
#[cfg_attr(feature="dev", allow(bool_comparison))] #[cfg_attr(feature="dev", allow(bool_comparison))]
fn set(&self) { fn set(&self) {
// Do not signal when we are about to close
if self.deleting.load(AtomicOrdering::Relaxed) {
return;
}
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
} }
} }
fn reset(&self) { fn reset(&self) {
self.signalled.store(false, AtomicOrdering::Relaxed); self.signalled.store(false, AtomicOrdering::Relaxed);
} }
@ -150,8 +157,12 @@ impl BlockQueue {
bad: Mutex::new(HashSet::new()), bad: Mutex::new(HashSet::new()),
}); });
let more_to_verify = Arc::new(Condvar::new()); let more_to_verify = Arc::new(Condvar::new());
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
let deleting = Arc::new(AtomicBool::new(false)); let deleting = Arc::new(AtomicBool::new(false));
let ready_signal = Arc::new(QueueSignal {
deleting: deleting.clone(),
signalled: AtomicBool::new(false),
message_channel: message_channel
});
let empty = Arc::new(Condvar::new()); let empty = Arc::new(Condvar::new());
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
@ -431,12 +442,14 @@ impl MayPanic for BlockQueue {
impl Drop for BlockQueue { impl Drop for BlockQueue {
fn drop(&mut self) { fn drop(&mut self) {
trace!(target: "shutdown", "[BlockQueue] Closing...");
self.clear(); self.clear();
self.deleting.store(true, AtomicOrdering::Release); self.deleting.store(true, AtomicOrdering::Release);
self.more_to_verify.notify_all(); self.more_to_verify.notify_all();
for t in self.verifiers.drain(..) { for t in self.verifiers.drain(..) {
t.join().unwrap(); t.join().unwrap();
} }
trace!(target: "shutdown", "[BlockQueue] Closed.");
} }
} }

View File

@ -427,6 +427,7 @@ impl BlockChain {
} }
} }
#[cfg_attr(feature="dev", allow(similar_names))]
/// Inserts the block into backing cache database. /// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified. /// Expects the block to be valid and already verified.
/// If the block is already known, does nothing. /// If the block is already known, does nothing.
@ -855,6 +856,7 @@ impl BlockChain {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(similar_names))]
use std::str::FromStr; use std::str::FromStr;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use util::hash::*; use util::hash::*;

View File

@ -87,6 +87,7 @@ impl Indexer {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(similar_names))]
use chainfilter::BloomIndex; use chainfilter::BloomIndex;
use chainfilter::indexer::Indexer; use chainfilter::indexer::Indexer;

View File

@ -23,7 +23,7 @@ use chainfilter::{BloomIndex, FilterDataSource, ChainFilter};
/// In memory cache for blooms. /// In memory cache for blooms.
/// ///
/// Stores all blooms in HashMap, which indexes them by `BloomIndex`. /// Stores all blooms in `HashMap`, which indexes them by `BloomIndex`.
pub struct MemoryCache { pub struct MemoryCache {
blooms: HashMap<BloomIndex, H2048>, blooms: HashMap<BloomIndex, H2048>,
} }

View File

@ -38,7 +38,7 @@ use block_queue::{BlockQueue, BlockQueueInfo};
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
use client::{BlockId, TransactionId, UncleId, ClientConfig, BlockChainClient}; use client::{BlockId, TransactionId, UncleId, ClientConfig, BlockChainClient};
use env_info::EnvInfo; use env_info::EnvInfo;
use executive::{Executive, Executed, contract_address}; use executive::{Executive, Executed, TransactOptions, contract_address};
use receipt::LocalizedReceipt; use receipt::LocalizedReceipt;
pub use blockchain::CacheSize as BlockChainCacheSize; pub use blockchain::CacheSize as BlockChainCacheSize;
@ -418,7 +418,8 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
// give the sender max balance // give the sender max balance
state.sub_balance(&sender, &balance); state.sub_balance(&sender, &balance);
state.add_balance(&sender, &U256::max_value()); state.add_balance(&sender, &U256::max_value());
Executive::new(&mut state, &env_info, self.engine.deref().deref()).transact(t, false) let options = TransactOptions { tracing: false, check_nonce: false };
Executive::new(&mut state, &env_info, self.engine.deref().deref()).transact(t, options)
} }
// TODO [todr] Should be moved to miner crate eventually. // TODO [todr] Should be moved to miner crate eventually.

View File

@ -65,7 +65,7 @@ pub enum Error {
/// Evm result. /// Evm result.
/// ///
/// Returns gas_left if execution is successful, otherwise error. /// Returns `gas_left` if execution is successful, otherwise error.
pub type Result = result::Result<U256, Error>; pub type Result = result::Result<U256, Error>;
/// Evm interface. /// Evm interface.

View File

@ -36,6 +36,14 @@ pub fn contract_address(address: &Address, nonce: &U256) -> Address {
From::from(stream.out().sha3()) From::from(stream.out().sha3())
} }
/// Transaction execution options.
pub struct TransactOptions {
/// Enable call tracing.
pub tracing: bool,
/// Check transaction nonce before execution.
pub check_nonce: bool,
}
/// Transaction execution receipt. /// Transaction execution receipt.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Executed { pub struct Executed {
@ -110,7 +118,7 @@ impl<'a> Executive<'a> {
} }
/// This funtion should be used to execute transaction. /// This funtion should be used to execute transaction.
pub fn transact(&'a mut self, t: &SignedTransaction, tracing: bool) -> Result<Executed, Error> { pub fn transact(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result<Executed, Error> {
let sender = try!(t.sender()); let sender = try!(t.sender());
let nonce = self.state.nonce(&sender); let nonce = self.state.nonce(&sender);
@ -124,9 +132,11 @@ impl<'a> Executive<'a> {
let init_gas = t.gas - base_gas_required; let init_gas = t.gas - base_gas_required;
// validate transaction nonce // validate transaction nonce
if options.check_nonce {
if t.nonce != nonce { if t.nonce != nonce {
return Err(From::from(ExecutionError::InvalidNonce { expected: nonce, got: t.nonce })); return Err(From::from(ExecutionError::InvalidNonce { expected: nonce, got: t.nonce }));
} }
}
// validate if transaction fits into given block // validate if transaction fits into given block
if self.info.gas_used + t.gas > self.info.gas_limit { if self.info.gas_used + t.gas > self.info.gas_limit {
@ -151,7 +161,7 @@ impl<'a> Executive<'a> {
self.state.inc_nonce(&sender); self.state.inc_nonce(&sender);
self.state.sub_balance(&sender, &U256::from(gas_cost)); self.state.sub_balance(&sender, &U256::from(gas_cost));
let mut substate = Substate::new(tracing); let mut substate = Substate::new(options.tracing);
let (gas_left, output) = match t.action { let (gas_left, output) = match t.action {
Action::Create => { Action::Create => {
@ -881,7 +891,8 @@ mod tests {
let executed = { let executed = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &engine);
ex.transact(&t, false).unwrap() let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts).unwrap()
}; };
assert_eq!(executed.gas, U256::from(100_000)); assert_eq!(executed.gas, U256::from(100_000));
@ -914,7 +925,8 @@ mod tests {
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &engine);
ex.transact(&t, false) let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
}; };
match res { match res {
@ -945,7 +957,8 @@ mod tests {
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &engine);
ex.transact(&t, false) let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
}; };
match res { match res {
@ -978,7 +991,8 @@ mod tests {
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &engine);
ex.transact(&t, false) let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
}; };
match res { match res {
@ -1011,7 +1025,8 @@ mod tests {
let res = { let res = {
let mut ex = Executive::new(&mut state, &info, &engine); let mut ex = Executive::new(&mut state, &info, &engine);
ex.transact(&t, false) let opts = TransactOptions { check_nonce: true, tracing: false };
ex.transact(&t, opts)
}; };
match res { match res {

View File

@ -16,7 +16,7 @@
use common::*; use common::*;
use engine::Engine; use engine::Engine;
use executive::Executive; use executive::{Executive, TransactOptions};
use account_db::*; use account_db::*;
#[cfg(test)] #[cfg(test)]
#[cfg(feature = "json-tests")] #[cfg(feature = "json-tests")]
@ -220,7 +220,8 @@ impl State {
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
// let old = self.to_pod(); // let old = self.to_pod();
let e = try!(Executive::new(self, env_info, engine).transact(t, tracing)); let options = TransactOptions { tracing: tracing, check_nonce: true };
let e = try!(Executive::new(self, env_info, engine).transact(t, options));
// TODO uncomment once to_pod() works correctly. // TODO uncomment once to_pod() works correctly.
// trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod())); // trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod()));

View File

@ -55,7 +55,7 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
/// Still operates on a individual block /// Still operates on a individual block
/// Returns a PreverifiedBlock structure populated with transactions /// Returns a `PreverifiedBlock` structure populated with transactions
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreverifiedBlock, Error> { pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreverifiedBlock, Error> {
try!(engine.verify_block_unordered(&header, Some(&bytes))); try!(engine.verify_block_unordered(&header, Some(&bytes)));
for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) { for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
@ -331,6 +331,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(feature="dev", allow(similar_names))]
fn test_verify_block() { fn test_verify_block() {
// Test against morden // Test against morden
let mut good = Header::new(); let mut good = Header::new();

1
fmt.sh
View File

@ -9,6 +9,7 @@ $RUSTFMT ./json/src/lib.rs
$RUSTFMT ./miner/src/lib.rs $RUSTFMT ./miner/src/lib.rs
$RUSTFMT ./parity/main.rs $RUSTFMT ./parity/main.rs
$RUSTFMT ./rpc/src/lib.rs $RUSTFMT ./rpc/src/lib.rs
$RUSTFMT ./webapp/src/lib.rs
$RUSTFMT ./sync/src/lib.rs $RUSTFMT ./sync/src/lib.rs
$RUSTFMT ./util/src/lib.rs $RUSTFMT ./util/src/lib.rs

View File

@ -7,6 +7,6 @@ echo "set -e" >> $FILE
echo "cargo build --release --features dev" >> $FILE echo "cargo build --release --features dev" >> $FILE
# Build tests # Build tests
echo "cargo test --no-run --features dev \\" >> $FILE echo "cargo test --no-run --features dev \\" >> $FILE
echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" >> $FILE echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer -p ethcore-webapp" >> $FILE
echo "" >> $FILE echo "" >> $FILE
chmod +x $FILE chmod +x $FILE

View File

@ -145,22 +145,6 @@ struct Dispatch {
return_type_ty: Option<P<Ty>>, return_type_ty: Option<P<Ty>>,
} }
fn implement_dispatch_arm_invoke(
cx: &ExtCtxt,
builder: &aster::AstBuilder,
dispatch: &Dispatch,
) -> P<ast::Expr>
{
let deserialize_expr = quote_expr!(cx, ::bincode::serde::deserialize_from(r, ::bincode::SizeLimit::Infinite).expect("ipc deserialization error, aborting"));
let input_type_id = builder.id(dispatch.input_type_name.clone().unwrap().as_str());
let function_name = builder.id(dispatch.function_name.as_str());
let output_type_id = builder.id(dispatch.return_type_name.clone().unwrap().as_str());
let input_args_exprs = dispatch.input_arg_names.iter().map(|ref arg_name| {
let arg_ident = builder.id(arg_name);
quote_expr!(cx, input. $arg_ident)
}).collect::<Vec<P<ast::Expr>>>();
// This is the expanded version of this: // This is the expanded version of this:
// //
// let invoke_serialize_stmt = quote_stmt!(cx, { // let invoke_serialize_stmt = quote_stmt!(cx, {
@ -169,7 +153,20 @@ fn implement_dispatch_arm_invoke(
// //
// But the above does not allow comma-separated expressions for arbitrary number // But the above does not allow comma-separated expressions for arbitrary number
// of parameters ...$hand_param_a, $hand_param_b, ... $hand_param_n // of parameters ...$hand_param_a, $hand_param_b, ... $hand_param_n
let invoke_serialize_stmt = { fn implement_dispatch_arm_invoke_stmt(
cx: &ExtCtxt,
builder: &aster::AstBuilder,
dispatch: &Dispatch,
) -> ast::Stmt
{
let function_name = builder.id(dispatch.function_name.as_str());
let output_type_id = builder.id(dispatch.return_type_name.clone().unwrap().as_str());
let input_args_exprs = dispatch.input_arg_names.iter().map(|ref arg_name| {
let arg_ident = builder.id(arg_name);
quote_expr!(cx, input. $arg_ident)
}).collect::<Vec<P<ast::Expr>>>();
let ext_cx = &*cx; let ext_cx = &*cx;
::quasi::parse_stmt_panic(&mut ::syntax::parse::new_parser_from_tts( ::quasi::parse_stmt_panic(&mut ::syntax::parse::new_parser_from_tts(
ext_cx.parse_sess(), ext_cx.parse_sess(),
@ -216,8 +213,25 @@ fn implement_dispatch_arm_invoke(
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren))); tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace))); tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace)));
tt tt
})) })).unwrap()
}
fn implement_dispatch_arm_invoke(
cx: &ExtCtxt,
builder: &aster::AstBuilder,
dispatch: &Dispatch,
buffer: bool,
) -> P<ast::Expr>
{
let deserialize_expr = if buffer {
quote_expr!(cx, ::bincode::serde::deserialize(buf).expect("ipc deserialization error, aborting"))
} else {
quote_expr!(cx, ::bincode::serde::deserialize_from(r, ::bincode::SizeLimit::Infinite).expect("ipc deserialization error, aborting"))
}; };
let input_type_id = builder.id(dispatch.input_type_name.clone().unwrap().as_str());
let invoke_serialize_stmt = implement_dispatch_arm_invoke_stmt(cx, builder, dispatch);
quote_expr!(cx, { quote_expr!(cx, {
let input: $input_type_id = $deserialize_expr; let input: $input_type_id = $deserialize_expr;
$invoke_serialize_stmt $invoke_serialize_stmt
@ -225,14 +239,31 @@ fn implement_dispatch_arm_invoke(
} }
/// generates dispatch match for method id /// generates dispatch match for method id
fn implement_dispatch_arm(cx: &ExtCtxt, builder: &aster::AstBuilder, index: u32, dispatch: &Dispatch) fn implement_dispatch_arm(
-> ast::Arm cx: &ExtCtxt,
builder: &aster::AstBuilder,
index: u32,
dispatch: &Dispatch,
buffer: bool,
) -> ast::Arm
{ {
let index_ident = builder.id(format!("{}", index).as_str()); let index_ident = builder.id(format!("{}", index).as_str());
let invoke_expr = implement_dispatch_arm_invoke(cx, builder, dispatch); let invoke_expr = implement_dispatch_arm_invoke(cx, builder, dispatch, buffer);
quote_arm!(cx, $index_ident => { $invoke_expr } ) quote_arm!(cx, $index_ident => { $invoke_expr } )
} }
fn implement_dispatch_arms(
cx: &ExtCtxt,
builder: &aster::AstBuilder,
dispatches: &[Dispatch],
buffer: bool,
) -> Vec<ast::Arm>
{
let mut index = -1;
dispatches.iter()
.map(|dispatch| { index = index + 1; implement_dispatch_arm(cx, builder, index as u32, dispatch, buffer) }).collect()
}
/// generates client type for specified server type /// generates client type for specified server type
/// for say `Service` it generates `ServiceClient` /// for say `Service` it generates `ServiceClient`
fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, item: &Item, push: &mut FnMut(Annotatable)) { fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, item: &Item, push: &mut FnMut(Annotatable)) {
@ -511,9 +542,9 @@ fn implement_interface(
dispatch_table.push(push_invoke_signature_aster(builder, &impl_item, signature, push)); dispatch_table.push(push_invoke_signature_aster(builder, &impl_item, signature, push));
} }
} }
let mut index = -1;
let dispatch_arms: Vec<_> = dispatch_table.iter() let dispatch_arms = implement_dispatch_arms(cx, builder, &dispatch_table, false);
.map(|dispatch| { index = index + 1; implement_dispatch_arm(cx, builder, index as u32, dispatch) }).collect(); let dispatch_arms_buffered = implement_dispatch_arms(cx, builder, &dispatch_table, true);
Ok((quote_item!(cx, Ok((quote_item!(cx,
impl $impl_generics ::ipc::IpcInterface<$ty> for $ty $where_clause { impl $impl_generics ::ipc::IpcInterface<$ty> for $ty $where_clause {
@ -531,6 +562,14 @@ fn implement_interface(
_ => vec![] _ => vec![]
} }
} }
fn dispatch_buf(&self, method_num: u16, buf: &[u8]) -> Vec<u8>
{
match method_num {
$dispatch_arms_buffered
_ => vec![]
}
}
} }
).unwrap(), dispatch_table)) ).unwrap(), dispatch_table))
} }

12
ipc/nano/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
[package]
name = "ethcore-ipc-nano"
version = "1.1.0"
authors = ["Nikolay Volf <nikolay@ethcore.io>"]
license = "GPL-3.0"
[features]
[dependencies]
"ethcore-ipc" = { path = "../rpc" }
nanomsg = "0.5.0"
log = "0.3"

214
ipc/nano/src/lib.rs Normal file
View File

@ -0,0 +1,214 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! IPC over nanomsg transport
extern crate ethcore_ipc as ipc;
extern crate nanomsg;
#[macro_use] extern crate log;
pub use ipc::*;
use std::sync::*;
use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut};
const POLL_TIMEOUT: isize = 100;
pub struct Worker<S> where S: IpcInterface<S> {
service: Arc<S>,
sockets: Vec<(Socket, Endpoint)>,
polls: Vec<PollFd>,
buf: Vec<u8>,
}
#[derive(Debug)]
pub enum SocketError {
DuplexLink
}
impl<S> Worker<S> where S: IpcInterface<S> {
pub fn new(service: Arc<S>) -> Worker<S> {
Worker::<S> {
service: service.clone(),
sockets: Vec::new(),
polls: Vec::new(),
buf: Vec::new(),
}
}
pub fn poll(&mut self) {
let mut request = PollRequest::new(&mut self.polls[..]);
let _result_guard = Socket::poll(&mut request, POLL_TIMEOUT);
for (fd_index, fd) in request.get_fds().iter().enumerate() {
if fd.can_read() {
let (ref mut socket, _) = self.sockets[fd_index];
unsafe { self.buf.set_len(0); }
match socket.nb_read_to_end(&mut self.buf) {
Ok(method_sign_len) => {
if method_sign_len >= 2 {
// method_num
let method_num = self.buf[1] as u16 * 256 + self.buf[0] as u16;
// payload
let payload = &self.buf[2..];
// dispatching for ipc interface
let result = self.service.dispatch_buf(method_num, payload);
if let Err(e) = socket.nb_write(&result) {
warn!(target: "ipc", "Failed to write response: {:?}", e);
}
}
else {
warn!(target: "ipc", "Failed to read method signature from socket: unexpected message length({})", method_sign_len);
}
},
Err(Error::TryAgain) => {
},
Err(x) => {
warn!(target: "ipc", "Error polling connections {:?}", x);
panic!();
}
}
}
}
}
fn rebuild_poll_request(&mut self) {
self.polls = self.sockets.iter()
.map(|&(ref socket, _)| socket.new_pollfd(PollInOut::In))
.collect::<Vec<PollFd>>();
}
pub fn add_duplex(&mut self, addr: &str) -> Result<(), SocketError> {
let mut socket = try!(Socket::new(Protocol::Pair).map_err(|e| {
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
SocketError::DuplexLink
}));
let endpoint = try!(socket.bind(addr).map_err(|e| {
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", addr, e);
SocketError::DuplexLink
}));
self.sockets.push((socket, endpoint));
self.rebuild_poll_request();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::Worker;
use ipc::*;
use std::io::{Read, Write};
use std::sync::{Arc, RwLock};
use nanomsg::{Socket, Protocol, Endpoint};
struct TestInvoke {
method_num: u16,
params: Vec<u8>,
}
struct DummyService {
methods_stack: RwLock<Vec<TestInvoke>>,
}
impl DummyService {
fn new() -> DummyService {
DummyService { methods_stack: RwLock::new(Vec::new()) }
}
}
impl IpcInterface<DummyService> for DummyService {
fn dispatch<R>(&self, _r: &mut R) -> Vec<u8> where R: Read {
vec![]
}
fn dispatch_buf(&self, method_num: u16, buf: &[u8]) -> Vec<u8> {
self.methods_stack.write().unwrap().push(
TestInvoke {
method_num: method_num,
params: buf.to_vec(),
});
vec![]
}
}
fn dummy_write(addr: &str, buf: &[u8]) -> (Socket, Endpoint) {
let mut socket = Socket::new(Protocol::Pair).unwrap();
let endpoint = socket.connect(addr).unwrap();
//thread::sleep_ms(10);
socket.write(buf).unwrap();
(socket, endpoint)
}
#[test]
fn can_create_worker() {
let worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
assert_eq!(0, worker.sockets.len());
}
#[test]
fn can_add_duplex_socket_to_worker() {
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
worker.add_duplex("ipc:///tmp/parity-test10.ipc").unwrap();
assert_eq!(1, worker.sockets.len());
}
#[test]
fn worker_can_poll_empty() {
let service = Arc::new(DummyService::new());
let mut worker = Worker::<DummyService>::new(service.clone());
worker.add_duplex("ipc:///tmp/parity-test20.ipc").unwrap();
worker.poll();
assert_eq!(0, service.methods_stack.read().unwrap().len());
}
#[test]
fn worker_can_poll() {
let url = "ipc:///tmp/parity-test30.ipc";
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
worker.add_duplex(url).unwrap();
let (_socket, _endpoint) = dummy_write(url, &vec![0, 0, 7, 7, 6, 6]);
worker.poll();
assert_eq!(1, worker.service.methods_stack.read().unwrap().len());
assert_eq!(0, worker.service.methods_stack.read().unwrap()[0].method_num);
assert_eq!([7, 7, 6, 6], worker.service.methods_stack.read().unwrap()[0].params[..]);
}
#[test]
fn worker_can_poll_long() {
let url = "ipc:///tmp/parity-test40.ipc";
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
worker.add_duplex(url).unwrap();
let message = [0u8; 1024*1024];
let (_socket, _endpoint) = dummy_write(url, &message);
worker.poll();
assert_eq!(1, worker.service.methods_stack.read().unwrap().len());
assert_eq!(0, worker.service.methods_stack.read().unwrap()[0].method_num);
assert_eq!(vec![0u8; 1024*1024-2], worker.service.methods_stack.read().unwrap()[0].params);
}
}

View File

@ -21,8 +21,12 @@ use std::marker::Sync;
use std::sync::atomic::*; use std::sync::atomic::*;
pub trait IpcInterface<T> { pub trait IpcInterface<T> {
/// reads the message from io, dispatches the call and returns result /// reads the message from io, dispatches the call and returns serialized result
fn dispatch<R>(&self, r: &mut R) -> Vec<u8> where R: Read; fn dispatch<R>(&self, r: &mut R) -> Vec<u8> where R: Read;
/// deserialize the payload from buffer, dispatches invoke and returns serialized result
/// (for non-blocking io)
fn dispatch_buf(&self, method_num: u16, buf: &[u8]) -> Vec<u8>;
} }
/// serializes method invocation (method_num and parameters) to the stream specified by `w` /// serializes method invocation (method_num and parameters) to the stream specified by `w`

View File

@ -10,7 +10,7 @@ rustc-serialize = "0.3"
serde = "0.7.0" serde = "0.7.0"
serde_json = "0.7.0" serde_json = "0.7.0"
serde_macros = { version = "0.7.0", optional = true } serde_macros = { version = "0.7.0", optional = true }
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.7.0", optional = true } serde_codegen = { version = "0.7.0", optional = true }

View File

@ -17,7 +17,7 @@ log = "0.3"
env_logger = "0.3" env_logger = "0.3"
rustc-serialize = "0.3" rustc-serialize = "0.3"
rayon = "0.3.1" rayon = "0.3.1"
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
[features] [features]
default = [] default = []

View File

@ -105,9 +105,15 @@ pub trait MinerService : Send + Sync {
/// Get the sealing work package and if `Some`, apply some transform. /// Get the sealing work package and if `Some`, apply some transform.
fn map_sealing_work<F, T>(&self, chain: &BlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T; fn map_sealing_work<F, T>(&self, chain: &BlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T;
/// Query pending transactions for hash /// Query pending transactions for hash.
fn transaction(&self, hash: &H256) -> Option<SignedTransaction>; fn transaction(&self, hash: &H256) -> Option<SignedTransaction>;
/// Get a list of all pending transactions.
fn pending_transactions(&self) -> Vec<SignedTransaction>;
/// Returns highest transaction nonce for given address.
fn last_nonce(&self, address: &Address) -> Option<U256>;
/// Suggested gas price /// Suggested gas price
fn sensible_gas_price(&self) -> U256 { x!(20000000000u64) } fn sensible_gas_price(&self) -> U256 { x!(20000000000u64) }
} }

View File

@ -94,6 +94,7 @@ impl Miner {
} }
/// Prepares new block for sealing including top transactions from queue. /// Prepares new block for sealing including top transactions from queue.
#[cfg_attr(feature="dev", allow(match_same_arms))]
fn prepare_sealing(&self, chain: &BlockChainClient) { fn prepare_sealing(&self, chain: &BlockChainClient) {
trace!(target: "miner", "prepare_sealing: entering"); trace!(target: "miner", "prepare_sealing: entering");
let transactions = self.transaction_queue.lock().unwrap().top_transactions(); let transactions = self.transaction_queue.lock().unwrap().top_transactions();
@ -164,7 +165,7 @@ impl Miner {
} }
); );
if let Some(block) = b { if let Some(block) = b {
if sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash() != block.block().fields().header.hash()).unwrap_or(true) { if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) {
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
sealing_work.push(block); sealing_work.push(block);
} }
@ -200,7 +201,7 @@ impl MinerService for Miner {
fn sensible_gas_price(&self) -> U256 { fn sensible_gas_price(&self) -> U256 {
// 10% above our minimum. // 10% above our minimum.
self.transaction_queue.lock().unwrap().minimal_gas_price().clone() * x!(110) / x!(100) *self.transaction_queue.lock().unwrap().minimal_gas_price() * x!(110) / x!(100)
} }
fn author(&self) -> Address { fn author(&self) -> Address {
@ -227,6 +228,15 @@ impl MinerService for Miner {
queue.find(hash) queue.find(hash)
} }
fn pending_transactions(&self) -> Vec<SignedTransaction> {
let queue = self.transaction_queue.lock().unwrap();
queue.top_transactions()
}
fn last_nonce(&self, address: &Address) -> Option<U256> {
self.transaction_queue.lock().unwrap().last_nonce(address)
}
fn update_sealing(&self, chain: &BlockChainClient) { fn update_sealing(&self, chain: &BlockChainClient) {
if self.sealing_enabled.load(atomic::Ordering::Relaxed) { if self.sealing_enabled.load(atomic::Ordering::Relaxed) {
let current_no = chain.chain_info().best_block_number; let current_no = chain.chain_info().best_block_number;

View File

@ -18,7 +18,7 @@
//! Transaction Queue //! Transaction Queue
//! //!
//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions //! `TransactionQueue` keeps track of all transactions seen by the node (received from other peers) and own transactions
//! and orders them by priority. Top priority transactions are those with low nonce height (difference between //! and orders them by priority. Top priority transactions are those with low nonce height (difference between
//! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used //! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used
//! for comparison (higher gas price = higher priority). //! for comparison (higher gas price = higher priority).
@ -179,7 +179,7 @@ impl VerifiedTransaction {
/// Holds transactions accessible by (address, nonce) and by priority /// Holds transactions accessible by (address, nonce) and by priority
/// ///
/// TransactionSet keeps number of entries below limit, but it doesn't /// `TransactionSet` keeps number of entries below limit, but it doesn't
/// automatically happen during `insert/remove` operations. /// automatically happen during `insert/remove` operations.
/// You have to call `enforce_limit` to remove lowest priority transactions from set. /// You have to call `enforce_limit` to remove lowest priority transactions from set.
struct TransactionSet { struct TransactionSet {
@ -262,7 +262,7 @@ pub struct AccountDetails {
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue. /// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
const GAS_LIMIT_HYSTERESIS: usize = 10; // % const GAS_LIMIT_HYSTERESIS: usize = 10; // %
/// TransactionQueue implementation /// `TransactionQueue` implementation
pub struct TransactionQueue { pub struct TransactionQueue {
/// Gas Price threshold for transactions that can be imported to this queue (defaults to 0) /// Gas Price threshold for transactions that can be imported to this queue (defaults to 0)
minimal_gas_price: U256, minimal_gas_price: U256,
@ -523,6 +523,11 @@ impl TransactionQueue {
self.last_nonces.clear(); self.last_nonces.clear();
} }
/// Returns highest transaction nonce for given address.
pub fn last_nonce(&self, address: &Address) -> Option<U256> {
self.last_nonces.get(address).cloned()
}
/// Checks if there are any transactions in `future` that should actually be promoted to `current` /// Checks if there are any transactions in `future` that should actually be promoted to `current`
/// (because nonce matches). /// (because nonce matches).
fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) {
@ -1255,4 +1260,29 @@ mod test {
assert_eq!(stats.future, 0); assert_eq!(stats.future, 0);
assert_eq!(stats.pending, 1); assert_eq!(stats.pending, 1);
} }
#[test]
fn should_return_none_when_transaction_from_given_address_does_not_exist() {
// given
let mut txq = TransactionQueue::new();
// then
assert_eq!(txq.last_nonce(&Address::default()), None);
}
#[test]
fn should_return_correct_nonce_when_transactions_from_given_address_exist() {
// given
let mut txq = TransactionQueue::new();
let tx = new_tx();
let from = tx.sender().unwrap();
let nonce = tx.nonce;
let details = |a: &Address| AccountDetails { nonce: nonce, balance: !U256::zero() };
// when
txq.add(tx, &details).unwrap();
// then
assert_eq!(txq.last_nonce(&from), Some(nonce));
}
} }

View File

@ -19,6 +19,7 @@
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(feature="dev", plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
#![cfg_attr(feature="dev", allow(useless_format))]
extern crate docopt; extern crate docopt;
extern crate num_cpus; extern crate num_cpus;
extern crate rustc_serialize; extern crate rustc_serialize;
@ -41,6 +42,8 @@ extern crate rpassword;
#[cfg(feature = "rpc")] #[cfg(feature = "rpc")]
extern crate ethcore_rpc as rpc; extern crate ethcore_rpc as rpc;
#[cfg(feature = "webapp")]
extern crate ethcore_webapp as webapp;
use std::io::{BufRead, BufReader}; use std::io::{BufRead, BufReader};
use std::fs::File; use std::fs::File;
@ -62,6 +65,10 @@ use ethminer::{Miner, MinerService};
use docopt::Docopt; use docopt::Docopt;
use daemonize::Daemonize; use daemonize::Daemonize;
use number_prefix::{binary_prefix, Standalone, Prefixed}; use number_prefix::{binary_prefix, Standalone, Prefixed};
#[cfg(feature = "rpc")]
use rpc::Server as RpcServer;
#[cfg(feature = "webapp")]
use webapp::Listening as WebappServer;
mod price_info; mod price_info;
@ -82,7 +89,7 @@ Parity. Ethereum Client.
Usage: Usage:
parity daemon <pid-file> [options] parity daemon <pid-file> [options]
parity account (new | list) parity account (new | list) [options]
parity [options] parity [options]
Protocol Options: Protocol Options:
@ -93,7 +100,7 @@ Protocol Options:
-d --db-path PATH Specify the database & configuration directory path -d --db-path PATH Specify the database & configuration directory path
[default: $HOME/.parity]. [default: $HOME/.parity].
--keys-path PATH Specify the path for JSON key files to be found --keys-path PATH Specify the path for JSON key files to be found
[default: $HOME/.web3/keys]. [default: $HOME/.parity/keys].
--identity NAME Specify your node's name. --identity NAME Specify your node's name.
Account Options: Account Options:
@ -117,7 +124,7 @@ Networking Options:
string or input to SHA3 operation. string or input to SHA3 operation.
API and Console Options: API and Console Options:
-j --jsonrpc Enable the JSON-RPC API sever. -j --jsonrpc Enable the JSON-RPC API server.
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API --jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
server, IP should be an interface's IP address, or server, IP should be an interface's IP address, or
all (all interfaces) or local [default: local]. all (all interfaces) or local [default: local].
@ -129,6 +136,13 @@ API and Console Options:
interface. APIS is a comma-delimited list of API interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth and net. name. Possible name are web3, eth and net.
[default: web3,eth,net,personal]. [default: web3,eth,net,personal].
-w --webapp Enable the web applications server (e.g. status page).
--webapp-port PORT Specify the port portion of the WebApps server
[default: 8080].
--webapp-interface IP Specify the hostname portion of the WebApps
server, IP should be an interface's IP address, or
all (all interfaces) or local [default: local].
Sealing/Mining Options: Sealing/Mining Options:
--usd-per-tx USD Amount of USD to be paid for a basic transaction --usd-per-tx USD Amount of USD to be paid for a basic transaction
@ -213,6 +227,9 @@ struct Args {
flag_jsonrpc_port: u16, flag_jsonrpc_port: u16,
flag_jsonrpc_cors: String, flag_jsonrpc_cors: String,
flag_jsonrpc_apis: String, flag_jsonrpc_apis: String,
flag_webapp: bool,
flag_webapp_port: u16,
flag_webapp_interface: String,
flag_author: String, flag_author: String,
flag_usd_per_tx: String, flag_usd_per_tx: String,
flag_usd_per_eth: String, flag_usd_per_eth: String,
@ -269,10 +286,10 @@ fn setup_rpc_server(
sync: Arc<EthSync>, sync: Arc<EthSync>,
secret_store: Arc<AccountService>, secret_store: Arc<AccountService>,
miner: Arc<Miner>, miner: Arc<Miner>,
url: &str, url: &SocketAddr,
cors_domain: &str, cors_domain: &str,
apis: Vec<&str> apis: Vec<&str>
) -> Option<Arc<PanicHandler>> { ) -> RpcServer {
use rpc::v1::*; use rpc::v1::*;
let server = rpc::RpcServer::new(); let server = rpc::RpcServer::new();
@ -290,8 +307,41 @@ fn setup_rpc_server(
} }
} }
} }
Some(server.start_http(url, cors_domain, ::num_cpus::get())) let start_result = server.start_http(url, cors_domain);
match start_result {
Err(rpc::RpcServerError::IoError(err)) => die_with_io_error(err),
Err(e) => die!("{:?}", e),
Ok(server) => server,
} }
}
#[cfg(feature = "webapp")]
fn setup_webapp_server(
client: Arc<Client>,
sync: Arc<EthSync>,
secret_store: Arc<AccountService>,
miner: Arc<Miner>,
url: &str
) -> WebappServer {
use rpc::v1::*;
let server = webapp::WebappServer::new();
server.add_delegate(Web3Client::new().to_delegate());
server.add_delegate(NetClient::new(&sync).to_delegate());
server.add_delegate(EthClient::new(&client, &sync, &secret_store, &miner).to_delegate());
server.add_delegate(EthFilterClient::new(&client, &miner).to_delegate());
server.add_delegate(PersonalClient::new(&secret_store).to_delegate());
let start_result = server.start_http(url, ::num_cpus::get());
match start_result {
Err(webapp::WebappServerError::IoError(err)) => die_with_io_error(err),
Err(e) => die!("{:?}", e),
Ok(handle) => handle,
}
}
#[cfg(not(feature = "rpc"))]
struct RpcServer;
#[cfg(not(feature = "rpc"))] #[cfg(not(feature = "rpc"))]
fn setup_rpc_server( fn setup_rpc_server(
@ -302,8 +352,22 @@ fn setup_rpc_server(
_url: &str, _url: &str,
_cors_domain: &str, _cors_domain: &str,
_apis: Vec<&str> _apis: Vec<&str>
) -> Option<Arc<PanicHandler>> { ) -> ! {
None die!("Your Parity version has been compiled without JSON-RPC support.")
}
#[cfg(not(feature = "webapp"))]
struct WebappServer;
#[cfg(not(feature = "webapp"))]
fn setup_webapp_server(
_client: Arc<Client>,
_sync: Arc<EthSync>,
_secret_store: Arc<AccountService>,
_miner: Arc<Miner>,
_url: &str
) -> ! {
die!("Your Parity version has been compiled without WebApps support.")
} }
fn print_version() { fn print_version() {
@ -361,9 +425,9 @@ impl Configuration {
die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx) die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx)
}); });
let usd_per_eth = match self.args.flag_usd_per_eth.as_str() { let usd_per_eth = match self.args.flag_usd_per_eth.as_str() {
"etherscan" => price_info::PriceInfo::get().map(|x| x.ethusd).unwrap_or_else(|| { "etherscan" => price_info::PriceInfo::get().map_or_else(|| {
die!("Unable to retrieve USD value of ETH from etherscan. Rerun with a different value for --usd-per-eth.") die!("Unable to retrieve USD value of ETH from etherscan. Rerun with a different value for --usd-per-eth.")
}), }, |x| x.ethusd),
x => FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x)) x => FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x))
}; };
let wei_per_usd: f32 = 1.0e18 / usd_per_eth; let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
@ -383,7 +447,7 @@ impl Configuration {
} }
} }
fn _keys_path(&self) -> String { fn keys_path(&self) -> String {
self.args.flag_keys_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) self.args.flag_keys_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
} }
@ -421,7 +485,6 @@ impl Configuration {
} }
} }
#[cfg_attr(feature="dev", allow(useless_format))]
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) { fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port)); let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port));
let public_address = if self.args.flag_nat.starts_with("extip:") { let public_address = if self.args.flag_nat.starts_with("extip:") {
@ -450,7 +513,6 @@ impl Configuration {
ret ret
} }
#[cfg_attr(feature="dev", allow(useless_format))]
fn client_config(&self) -> ClientConfig { fn client_config(&self) -> ClientConfig {
let mut client_config = ClientConfig::default(); let mut client_config = ClientConfig::default();
match self.args.flag_cache { match self.args.flag_cache {
@ -505,7 +567,7 @@ impl Configuration {
fn execute_account_cli(&self) { fn execute_account_cli(&self) {
use util::keys::store::SecretStore; use util::keys::store::SecretStore;
use rpassword::read_password; use rpassword::read_password;
let mut secret_store = SecretStore::new(); let mut secret_store = SecretStore::new_in(Path::new(&self.keys_path()));
if self.args.cmd_new { if self.args.cmd_new {
println!("Please note that password is NOT RECOVERABLE."); println!("Please note that password is NOT RECOVERABLE.");
println!("Type password: "); println!("Type password: ");
@ -539,7 +601,7 @@ impl Configuration {
.into_iter() .into_iter()
}).collect::<Vec<_>>(); }).collect::<Vec<_>>();
let account_service = AccountService::new(); let account_service = AccountService::new_in(Path::new(&self.keys_path()));
for d in &self.args.flag_unlock { for d in &self.args.flag_unlock {
let a = Address::from_str(clean_0x(&d)).unwrap_or_else(|_| { let a = Address::from_str(clean_0x(&d)).unwrap_or_else(|_| {
die!("{}: Invalid address for --unlock. Must be 40 hex characters, without the 0x at the beginning.", d) die!("{}: Invalid address for --unlock. Must be 40 hex characters, without the 0x at the beginning.", d)
@ -551,7 +613,6 @@ impl Configuration {
account_service account_service
} }
#[cfg_attr(feature="dev", allow(useless_format))]
fn execute_client(&self) { fn execute_client(&self) {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
@ -569,7 +630,10 @@ impl Configuration {
let account_service = Arc::new(self.account_service()); let account_service = Arc::new(self.account_service());
// Build client // Build client
let mut service = ClientService::start(self.client_config(), spec, net_settings, &Path::new(&self.path())).unwrap(); let mut service = ClientService::start(
self.client_config(), spec, net_settings, &Path::new(&self.path())
).unwrap_or_else(|e| die_with_error(e));
panic_handler.forward_from(&service); panic_handler.forward_from(&service);
let client = service.client(); let client = service.client();
@ -584,7 +648,8 @@ impl Configuration {
let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone()); let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone());
// Setup rpc // Setup rpc
if self.args.flag_jsonrpc || self.args.flag_rpc { let rpc_server = if self.args.flag_jsonrpc || self.args.flag_rpc {
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
let url = format!("{}:{}", let url = format!("{}:{}",
match self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_interface).as_str() { match self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_interface).as_str() {
"all" => "0.0.0.0", "all" => "0.0.0.0",
@ -593,23 +658,41 @@ impl Configuration {
}, },
self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port)
); );
SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url));
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); let cors_domain = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
// TODO: use this as the API list.
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); Some(setup_rpc_server(
let server_handler = setup_rpc_server( service.client(),
sync.clone(),
account_service.clone(),
miner.clone(),
&addr,
&cors_domain,
apis.split(',').collect()
))
} else {
None
};
let webapp_server = if self.args.flag_webapp {
let url = format!("{}:{}",
match self.args.flag_webapp_interface.as_str() {
"all" => "0.0.0.0",
"local" => "127.0.0.1",
x => x,
},
self.args.flag_webapp_port
);
Some(setup_webapp_server(
service.client(), service.client(),
sync.clone(), sync.clone(),
account_service.clone(), account_service.clone(),
miner.clone(), miner.clone(),
&url, &url,
cors, ))
apis.split(',').collect() } else {
); None
if let Some(handler) = server_handler { };
panic_handler.forward_from(handler.deref());
}
}
// Register IO handler // Register IO handler
let io_handler = Arc::new(ClientIoHandler { let io_handler = Arc::new(ClientIoHandler {
@ -621,11 +704,11 @@ impl Configuration {
service.io().register_handler(io_handler).expect("Error registering IO handler"); service.io().register_handler(io_handler).expect("Error registering IO handler");
// Handle exit // Handle exit
wait_for_exit(panic_handler); wait_for_exit(panic_handler, rpc_server, webapp_server);
} }
} }
fn wait_for_exit(panic_handler: Arc<PanicHandler>) { fn wait_for_exit(panic_handler: Arc<PanicHandler>, _rpc_server: Option<RpcServer>, _webapp_server: Option<WebappServer>) {
let exit = Arc::new(Condvar::new()); let exit = Arc::new(Condvar::new());
// Handle possible exits // Handle possible exits
@ -639,6 +722,30 @@ fn wait_for_exit(panic_handler: Arc<PanicHandler>) {
// Wait for signal // Wait for signal
let mutex = Mutex::new(()); let mutex = Mutex::new(());
let _ = exit.wait(mutex.lock().unwrap()).unwrap(); let _ = exit.wait(mutex.lock().unwrap()).unwrap();
info!("Finishing work, please wait...");
}
fn die_with_error(e: ethcore::error::Error) -> ! {
use ethcore::error::Error;
match e {
Error::Util(UtilError::StdIo(e)) => die_with_io_error(e),
_ => die!("{:?}", e),
}
}
fn die_with_io_error(e: std::io::Error) -> ! {
match e.kind() {
std::io::ErrorKind::PermissionDenied => {
die!("No permissions to bind to specified port.")
},
std::io::ErrorKind::AddrInUse => {
die!("Specified address is already in use. Please make sure that nothing is listening on the same port or try using a different one.")
},
std::io::ErrorKind::AddrNotAvailable => {
die!("Could not use specified interface or given address is invalid.")
},
_ => die!("{:?}", e),
}
} }
fn main() { fn main() {

View File

@ -19,8 +19,8 @@ impl PriceInfo {
.and_then(|mut s| s.read_to_string(&mut body).ok()) .and_then(|mut s| s.read_to_string(&mut body).ok())
.and_then(|_| Json::from_str(&body).ok()) .and_then(|_| Json::from_str(&body).ok())
.and_then(|json| json.find_path(&["result", "ethusd"]) .and_then(|json| json.find_path(&["result", "ethusd"])
.and_then(|obj| match obj { .and_then(|obj| match *obj {
&Json::String(ref s) => Some(PriceInfo { Json::String(ref s) => Some(PriceInfo {
ethusd: FromStr::from_str(&s).unwrap() ethusd: FromStr::from_str(&s).unwrap()
}), }),
_ => None _ => None

View File

@ -13,7 +13,7 @@ log = "0.3"
serde = "0.7.0" serde = "0.7.0"
serde_json = "0.7.0" serde_json = "0.7.0"
jsonrpc-core = "2.0" jsonrpc-core = "2.0"
jsonrpc-http-server = "3.0" jsonrpc-http-server = { git = "https://github.com/debris/jsonrpc-http-server.git" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
@ -22,7 +22,7 @@ ethminer = { path = "../miner" }
rustc-serialize = "0.3" rustc-serialize = "0.3"
transient-hashmap = "0.1" transient-hashmap = "0.1"
serde_macros = { version = "0.7.0", optional = true } serde_macros = { version = "0.7.0", optional = true }
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.7.0", optional = true } serde_codegen = { version = "0.7.0", optional = true }

View File

@ -33,10 +33,10 @@ extern crate ethminer;
extern crate transient_hashmap; extern crate transient_hashmap;
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::net::SocketAddr;
use util::panics::PanicHandler;
use self::jsonrpc_core::{IoHandler, IoDelegate}; use self::jsonrpc_core::{IoHandler, IoDelegate};
pub use jsonrpc_http_server::{Server, RpcServerError};
pub mod v1; pub mod v1;
/// Http server. /// Http server.
@ -45,7 +45,7 @@ pub struct RpcServer {
} }
impl RpcServer { impl RpcServer {
/// Construct new http server object with given number of threads. /// Construct new http server object.
pub fn new() -> RpcServer { pub fn new() -> RpcServer {
RpcServer { RpcServer {
handler: Arc::new(IoHandler::new()), handler: Arc::new(IoHandler::new()),
@ -57,18 +57,9 @@ impl RpcServer {
self.handler.add_delegate(delegate); self.handler.add_delegate(delegate);
} }
/// Start server asynchronously in new thread and returns panic handler. /// Start server asynchronously and returns result with `Server` handle on success or an error.
pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc<PanicHandler> { pub fn start_http(&self, addr: &SocketAddr, cors_domain: &str) -> Result<Server, RpcServerError> {
let addr = addr.to_owned();
let cors_domain = cors_domain.to_owned(); let cors_domain = cors_domain.to_owned();
let panic_handler = PanicHandler::new_in_arc(); Server::start(addr, self.handler.clone(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain))
let ph = panic_handler.clone();
let server = jsonrpc_http_server::Server::new(self.handler.clone());
thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || {
ph.catch_panic(move || {
server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads);
}).unwrap()
}).expect("Error while creating jsonrpc http thread");
panic_handler
} }
} }

View File

@ -43,6 +43,10 @@ fn default_gas() -> U256 {
U256::from(21_000) U256::from(21_000)
} }
fn default_call_gas() -> U256 {
U256::from(50_000_000)
}
/// Eth rpc implementation. /// Eth rpc implementation.
pub struct EthClient<C, S, A, M, EM = ExternalMiner> pub struct EthClient<C, S, A, M, EM = ExternalMiner>
where C: BlockChainClient, where C: BlockChainClient,
@ -175,27 +179,30 @@ impl<C, S, A, M, EM> EthClient<C, S, A, M, EM>
Ok(EthTransaction { Ok(EthTransaction {
nonce: request.nonce.unwrap_or_else(|| client.nonce(&from)), nonce: request.nonce.unwrap_or_else(|| client.nonce(&from)),
action: request.to.map_or(Action::Create, Action::Call), action: request.to.map_or(Action::Create, Action::Call),
gas: request.gas.unwrap_or_else(default_gas), gas: request.gas.unwrap_or_else(default_call_gas),
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()), gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
value: request.value.unwrap_or_else(U256::zero), value: request.value.unwrap_or_else(U256::zero),
data: request.data.map_or_else(Vec::new, |d| d.to_vec()) data: request.data.map_or_else(Vec::new, |d| d.to_vec())
}.fake_sign(from)) }.fake_sign(from))
} }
fn dispatch_transaction(&self, signed_transaction: SignedTransaction, raw_transaction: Vec<u8>) -> Result<Value, Error> { fn dispatch_transaction(&self, signed_transaction: SignedTransaction) -> Result<Value, Error> {
let hash = signed_transaction.hash(); let hash = signed_transaction.hash();
let import = { let import = {
let miner = take_weak!(self.miner);
let client = take_weak!(self.client); let client = take_weak!(self.client);
take_weak!(self.miner).import_transactions(vec![signed_transaction], |a: &Address| AccountDetails { take_weak!(self.miner).import_transactions(vec![signed_transaction], |a: &Address| AccountDetails {
nonce: client.nonce(a), nonce: miner
.last_nonce(a)
.map(|nonce| nonce + U256::one())
.unwrap_or_else(|| client.nonce(a)),
balance: client.balance(a), balance: client.balance(a),
}) })
}; };
match import.into_iter().collect::<Result<Vec<_>, _>>() { match import.into_iter().collect::<Result<Vec<_>, _>>() {
Ok(_) => { Ok(_) => {
take_weak!(self.sync).new_transaction(raw_transaction);
to_value(&hash) to_value(&hash)
} }
Err(e) => { Err(e) => {
@ -484,7 +491,11 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
let client = take_weak!(self.client); let client = take_weak!(self.client);
let miner = take_weak!(self.miner); let miner = take_weak!(self.miner);
EthTransaction { EthTransaction {
nonce: request.nonce.unwrap_or_else(|| client.nonce(&request.from)), nonce: request.nonce
.or_else(|| miner
.last_nonce(&request.from)
.map(|nonce| nonce + U256::one()))
.unwrap_or_else(|| client.nonce(&request.from)),
action: request.to.map_or(Action::Create, Action::Call), action: request.to.map_or(Action::Create, Action::Call),
gas: request.gas.unwrap_or_else(default_gas), gas: request.gas.unwrap_or_else(default_gas),
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()), gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
@ -492,8 +503,7 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
data: request.data.map_or_else(Vec::new, |d| d.to_vec()), data: request.data.map_or_else(Vec::new, |d| d.to_vec()),
}.sign(&secret) }.sign(&secret)
}; };
let raw_transaction = encode(&signed_transaction).to_vec(); self.dispatch_transaction(signed_transaction)
self.dispatch_transaction(signed_transaction, raw_transaction)
}, },
Err(_) => { to_value(&H256::zero()) } Err(_) => { to_value(&H256::zero()) }
} }
@ -505,7 +515,7 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
.and_then(|(raw_transaction, )| { .and_then(|(raw_transaction, )| {
let raw_transaction = raw_transaction.to_vec(); let raw_transaction = raw_transaction.to_vec();
match UntrustedRlp::new(&raw_transaction).as_val() { match UntrustedRlp::new(&raw_transaction).as_val() {
Ok(signed_transaction) => self.dispatch_transaction(signed_transaction, raw_transaction), Ok(signed_transaction) => self.dispatch_transaction(signed_transaction),
Err(_) => to_value(&H256::zero()), Err(_) => to_value(&H256::zero()),
} }
}) })

View File

@ -49,7 +49,7 @@ impl<A> Personal for PersonalClient<A> where A: AccountProvider + 'static {
|(pass, )| { |(pass, )| {
let store = take_weak!(self.accounts); let store = take_weak!(self.accounts);
match store.new_account(&pass) { match store.new_account(&pass) {
Ok(address) => Ok(Value::String(format!("0x{:?}", address))), Ok(address) => to_value(&address),
Err(_) => Err(Error::internal_error()) Err(_) => Err(Error::internal_error())
} }
} }

View File

@ -23,6 +23,7 @@ use util::numbers::{Uint, U256};
use ethcore::client::{TestBlockChainClient, EachBlockWith, Executed, TransactionId}; use ethcore::client::{TestBlockChainClient, EachBlockWith, Executed, TransactionId};
use ethcore::log_entry::{LocalizedLogEntry, LogEntry}; use ethcore::log_entry::{LocalizedLogEntry, LogEntry};
use ethcore::receipt::LocalizedReceipt; use ethcore::receipt::LocalizedReceipt;
use ethcore::transaction::{Transaction, Action};
use v1::{Eth, EthClient}; use v1::{Eth, EthClient};
use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService, TestExternalMiner}; use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService, TestExternalMiner};
@ -52,7 +53,7 @@ fn miner_service() -> Arc<TestMinerService> {
struct EthTester { struct EthTester {
pub client: Arc<TestBlockChainClient>, pub client: Arc<TestBlockChainClient>,
pub sync: Arc<TestSyncProvider>, pub sync: Arc<TestSyncProvider>,
_accounts_provider: Arc<TestAccountProvider>, pub accounts_provider: Arc<TestAccountProvider>,
miner: Arc<TestMinerService>, miner: Arc<TestMinerService>,
hashrates: Arc<RwLock<HashMap<H256, U256>>>, hashrates: Arc<RwLock<HashMap<H256, U256>>>,
pub io: IoHandler, pub io: IoHandler,
@ -72,7 +73,7 @@ impl Default for EthTester {
EthTester { EthTester {
client: client, client: client,
sync: sync, sync: sync,
_accounts_provider: ap, accounts_provider: ap,
miner: miner, miner: miner,
io: io, io: io,
hashrates: hashrates, hashrates: hashrates,
@ -453,9 +454,53 @@ fn rpc_eth_estimate_gas_default_block() {
} }
#[test] #[test]
#[ignore]
fn rpc_eth_send_transaction() { fn rpc_eth_send_transaction() {
unimplemented!() let account = TestAccount::new("123");
let address = account.address();
let secret = account.secret.clone();
let tester = EthTester::default();
tester.accounts_provider.accounts.write().unwrap().insert(address.clone(), account);
let request = r#"{
"jsonrpc": "2.0",
"method": "eth_sendTransaction",
"params": [{
"from": ""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a"
}],
"id": 1
}"#;
let t = Transaction {
nonce: U256::zero(),
gas_price: U256::from(0x9184e72a000u64),
gas: U256::from(0x76c0),
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
value: U256::from(0x9184e72au64),
data: vec![]
}.sign(&secret);
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":1}"#;
assert_eq!(tester.io.handle_request(request.as_ref()), Some(response));
tester.miner.last_nonces.write().unwrap().insert(address.clone(), U256::zero());
let t = Transaction {
nonce: U256::one(),
gas_price: U256::from(0x9184e72a000u64),
gas: U256::from(0x76c0),
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
value: U256::from(0x9184e72au64),
data: vec![]
}.sign(&secret);
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":1}"#;
assert_eq!(tester.io.handle_request(request.as_ref()), Some(response));
} }
#[test] #[test]

View File

@ -20,7 +20,7 @@ use std::sync::RwLock;
use std::collections::HashMap; use std::collections::HashMap;
use std::io; use std::io;
use util::hash::{Address, H256, FixedHash}; use util::hash::{Address, H256, FixedHash};
use util::crypto::{Secret, Signature}; use util::crypto::{Secret, Signature, KeyPair};
use util::keys::store::{AccountProvider, SigningError, EncryptedHashMapError}; use util::keys::store::{AccountProvider, SigningError, EncryptedHashMapError};
/// Account mock. /// Account mock.
@ -30,23 +30,31 @@ pub struct TestAccount {
pub unlocked: bool, pub unlocked: bool,
/// Account's password. /// Account's password.
pub password: String, pub password: String,
/// Account's secret.
pub secret: Secret,
} }
impl TestAccount { impl TestAccount {
/// Creates new test account. /// Creates new test account.
pub fn new(password: &str) -> Self { pub fn new(password: &str) -> Self {
let pair = KeyPair::create().unwrap();
TestAccount { TestAccount {
unlocked: false, unlocked: false,
password: password.to_owned(), password: password.to_owned(),
secret: pair.secret().clone()
} }
} }
/// Returns account address.
pub fn address(&self) -> Address {
KeyPair::from_secret(self.secret.clone()).unwrap().address()
}
} }
/// Test account provider. /// Test account provider.
pub struct TestAccountProvider { pub struct TestAccountProvider {
accounts: RwLock<HashMap<Address, TestAccount>>, /// Test provider accounts.
/// Added accounts passwords. pub accounts: RwLock<HashMap<Address, TestAccount>>,
pub adds: RwLock<Vec<String>>,
} }
impl TestAccountProvider { impl TestAccountProvider {
@ -54,7 +62,6 @@ impl TestAccountProvider {
pub fn new(accounts: HashMap<Address, TestAccount>) -> Self { pub fn new(accounts: HashMap<Address, TestAccount>) -> Self {
TestAccountProvider { TestAccountProvider {
accounts: RwLock::new(accounts), accounts: RwLock::new(accounts),
adds: RwLock::new(vec![]),
} }
} }
} }
@ -76,14 +83,20 @@ impl AccountProvider for TestAccountProvider {
} }
fn new_account(&self, pass: &str) -> Result<Address, io::Error> { fn new_account(&self, pass: &str) -> Result<Address, io::Error> {
let mut adds = self.adds.write().unwrap(); let account = TestAccount::new(pass);
let address = Address::from(adds.len() as u64 + 2); let address = KeyPair::from_secret(account.secret.clone()).unwrap().address();
adds.push(pass.to_owned()); self.accounts.write().unwrap().insert(address.clone(), account);
Ok(address) Ok(address)
} }
fn account_secret(&self, _account: &Address) -> Result<Secret, SigningError> { fn account_secret(&self, address: &Address) -> Result<Secret, SigningError> {
Ok(Secret::random()) // todo: consider checking if account is unlock. some test may need alteration then.
self.accounts
.read()
.unwrap()
.get(address)
.ok_or(SigningError::NoAccount)
.map(|acc| acc.secret.clone())
} }
fn sign(&self, _account: &Address, _message: &H256) -> Result<Signature, SigningError> { fn sign(&self, _account: &Address, _message: &H256) -> Result<Signature, SigningError> {

View File

@ -16,7 +16,7 @@
//! Test implementation of miner service. //! Test implementation of miner service.
use util::{Address, H256, Bytes}; use util::{Address, H256, Bytes, U256};
use util::standard::*; use util::standard::*;
use ethcore::error::Error; use ethcore::error::Error;
use ethcore::client::BlockChainClient; use ethcore::client::BlockChainClient;
@ -27,19 +27,22 @@ use ethminer::{MinerService, MinerStatus, AccountDetails};
/// Test miner service. /// Test miner service.
pub struct TestMinerService { pub struct TestMinerService {
/// Imported transactions. /// Imported transactions.
pub imported_transactions: RwLock<Vec<H256>>, pub imported_transactions: Mutex<Vec<SignedTransaction>>,
/// Latest closed block. /// Latest closed block.
pub latest_closed_block: Mutex<Option<ClosedBlock>>, pub latest_closed_block: Mutex<Option<ClosedBlock>>,
/// Pre-existed pending transactions /// Pre-existed pending transactions
pub pending_transactions: Mutex<HashMap<H256, SignedTransaction>>, pub pending_transactions: Mutex<HashMap<H256, SignedTransaction>>,
/// Last nonces.
pub last_nonces: RwLock<HashMap<Address, U256>>,
} }
impl Default for TestMinerService { impl Default for TestMinerService {
fn default() -> TestMinerService { fn default() -> TestMinerService {
TestMinerService { TestMinerService {
imported_transactions: RwLock::new(Vec::new()), imported_transactions: Mutex::new(Vec::new()),
latest_closed_block: Mutex::new(None), latest_closed_block: Mutex::new(None),
pending_transactions: Mutex::new(HashMap::new()), pending_transactions: Mutex::new(HashMap::new()),
last_nonces: RwLock::new(HashMap::new()),
} }
} }
} }
@ -56,28 +59,56 @@ impl MinerService for TestMinerService {
} }
/// Imports transactions to transaction queue. /// Imports transactions to transaction queue.
fn import_transactions<T>(&self, _transactions: Vec<SignedTransaction>, _fetch_account: T) -> Vec<Result<(), Error>> fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, _fetch_account: T) -> Vec<Result<(), Error>>
where T: Fn(&Address) -> AccountDetails { unimplemented!(); } where T: Fn(&Address) -> AccountDetails {
// lets assume that all txs are valid
self.imported_transactions.lock().unwrap().extend_from_slice(&transactions);
transactions
.iter()
.map(|_| Ok(()))
.collect()
}
/// Returns hashes of transactions currently in pending /// Returns hashes of transactions currently in pending
fn pending_transactions_hashes(&self) -> Vec<H256> { vec![] } fn pending_transactions_hashes(&self) -> Vec<H256> {
vec![]
}
/// Removes all transactions from the queue and restart mining operation. /// Removes all transactions from the queue and restart mining operation.
fn clear_and_reset(&self, _chain: &BlockChainClient) { unimplemented!(); } fn clear_and_reset(&self, _chain: &BlockChainClient) {
unimplemented!();
}
/// Called when blocks are imported to chain, updates transactions queue. /// Called when blocks are imported to chain, updates transactions queue.
fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) { unimplemented!(); } fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) {
unimplemented!();
}
/// New chain head event. Restart mining operation. /// New chain head event. Restart mining operation.
fn update_sealing(&self, _chain: &BlockChainClient) { unimplemented!(); } fn update_sealing(&self, _chain: &BlockChainClient) {
unimplemented!();
}
fn map_sealing_work<F, T>(&self, _chain: &BlockChainClient, _f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T { unimplemented!(); } fn map_sealing_work<F, T>(&self, _chain: &BlockChainClient, _f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
unimplemented!();
}
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> { fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
self.pending_transactions.lock().unwrap().get(hash).and_then(|tx_ref| Some(tx_ref.clone())) self.pending_transactions.lock().unwrap().get(hash).cloned()
}
fn pending_transactions(&self) -> Vec<SignedTransaction> {
self.pending_transactions.lock().unwrap().values().cloned().collect()
}
fn last_nonce(&self, address: &Address) -> Option<U256> {
self.last_nonces.read().unwrap().get(address).cloned()
} }
/// Submit `seal` as a valid solution for the header of `pow_hash`. /// Submit `seal` as a valid solution for the header of `pow_hash`.
/// Will check the seal, but not actually insert the block into the chain. /// Will check the seal, but not actually insert the block into the chain.
fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> { unimplemented!(); } fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {
unimplemented!();
}
} }

View File

@ -16,7 +16,7 @@
//! Test implementation of SyncProvider. //! Test implementation of SyncProvider.
use util::{U256, Bytes}; use util::{U256};
use ethsync::{SyncProvider, SyncStatus, SyncState}; use ethsync::{SyncProvider, SyncStatus, SyncState};
use std::sync::{RwLock}; use std::sync::{RwLock};
@ -59,8 +59,5 @@ impl SyncProvider for TestSyncProvider {
fn status(&self) -> SyncStatus { fn status(&self) -> SyncStatus {
self.status.read().unwrap().clone() self.status.read().unwrap().clone()
} }
fn new_transaction(&self, _raw_transaction: Bytes) {
}
} }

View File

@ -22,8 +22,7 @@ use util::numbers::*;
use std::collections::*; use std::collections::*;
fn accounts_provider() -> Arc<TestAccountProvider> { fn accounts_provider() -> Arc<TestAccountProvider> {
let mut accounts = HashMap::new(); let accounts = HashMap::new();
accounts.insert(Address::from(1), TestAccount::new("test"));
let ap = TestAccountProvider::new(accounts); let ap = TestAccountProvider::new(accounts);
Arc::new(ap) Arc::new(ap)
} }
@ -38,7 +37,11 @@ fn setup() -> (Arc<TestAccountProvider>, IoHandler) {
#[test] #[test]
fn accounts() { fn accounts() {
let (_test_provider, io) = setup(); let (test_provider, io) = setup();
test_provider.accounts
.write()
.unwrap()
.insert(Address::from(1), TestAccount::new("test"));
let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#; let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#;
@ -49,11 +52,22 @@ fn accounts() {
#[test] #[test]
fn new_account() { fn new_account() {
let (_test_provider, io) = setup(); let (test_provider, io) = setup();
let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#; let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000002","id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned())); let res = io.handle_request(request);
let accounts = test_provider.accounts.read().unwrap();
assert_eq!(accounts.len(), 1);
let address = accounts
.keys()
.nth(0)
.cloned()
.unwrap();
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"","id":1}"#;
assert_eq!(res, Some(response));
} }

View File

@ -190,9 +190,6 @@ pub trait EthFilter: Sized + Send + Sync + 'static {
/// Returns filter changes since last poll. /// Returns filter changes since last poll.
fn filter_changes(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() } fn filter_changes(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
/// Returns filter logs.
fn filter_logs(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
/// Uninstalls filter. /// Uninstalls filter.
fn uninstall_filter(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() } fn uninstall_filter(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
@ -203,7 +200,7 @@ pub trait EthFilter: Sized + Send + Sync + 'static {
delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter); delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter);
delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter); delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter);
delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes); delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes);
delegate.add_method("eth_getFilterLogs", EthFilter::filter_logs); delegate.add_method("eth_getFilterLogs", EthFilter::filter_changes);
delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter); delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter);
delegate delegate
} }

View File

@ -10,7 +10,7 @@ authors = ["Ethcore <admin@ethcore.io"]
[dependencies] [dependencies]
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
ethminer = { path = "../miner" } ethminer = { path = "../miner" }
log = "0.3" log = "0.3"
env_logger = "0.3" env_logger = "0.3"

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// ///
/// BlockChain synchronization strategy. /// `BlockChain` synchronization strategy.
/// Syncs to peers and keeps up to date. /// Syncs to peers and keeps up to date.
/// This implementation uses ethereum protocol v63 /// This implementation uses ethereum protocol v63
/// ///
@ -127,7 +127,7 @@ pub struct SyncStatus {
pub protocol_version: u8, pub protocol_version: u8,
/// The underlying p2p network version. /// The underlying p2p network version.
pub network_id: U256, pub network_id: U256,
/// BlockChain height for the moment the sync started. /// `BlockChain` height for the moment the sync started.
pub start_block_number: BlockNumber, pub start_block_number: BlockNumber,
/// Last fully downloaded and imported block number (if any). /// Last fully downloaded and imported block number (if any).
pub last_imported_block_number: Option<BlockNumber>, pub last_imported_block_number: Option<BlockNumber>,
@ -217,10 +217,6 @@ pub struct ChainSync {
network_id: U256, network_id: U256,
/// Miner /// Miner
miner: Arc<Miner>, miner: Arc<Miner>,
/// Transactions to propagate
// TODO: reconsider where this is in the codebase - seems a little dodgy to have here.
transactions_to_send: Vec<Bytes>,
} }
type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>; type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
@ -247,7 +243,6 @@ impl ChainSync {
max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks),
network_id: config.network_id, network_id: config.network_id,
miner: miner, miner: miner,
transactions_to_send: vec![],
} }
} }
@ -950,11 +945,6 @@ impl ChainSync {
} }
} }
/// Place a new transaction on the wire.
pub fn new_transaction(&mut self, raw_transaction: Bytes) {
self.transactions_to_send.push(raw_transaction);
}
/// Called when peer sends us new transactions /// Called when peer sends us new transactions
fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
// accepting transactions once only fully synced // accepting transactions once only fully synced
@ -1292,15 +1282,20 @@ impl ChainSync {
fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize { fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize {
// Early out of nobody to send to. // Early out of nobody to send to.
if self.peers.len() == 0 { if self.peers.is_empty() {
return 0; return 0;
} }
let mut packet = RlpStream::new_list(self.transactions_to_send.len()); let mut transactions = self.miner.pending_transactions();
for tx in self.transactions_to_send.iter() { if transactions.is_empty() {
packet.append_raw(tx, 1); return 0;
}
let mut packet = RlpStream::new_list(transactions.len());
let tx_count = transactions.len();
for tx in transactions.drain(..) {
packet.append(&tx);
} }
self.transactions_to_send.clear();
let rlp = packet.out(); let rlp = packet.out();
let lucky_peers = { let lucky_peers = {
@ -1312,20 +1307,19 @@ impl ChainSync {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// taking at max of MAX_PEERS_PROPAGATION // taking at max of MAX_PEERS_PROPAGATION
lucky_peers.iter().map(|&id| id.clone()).take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::<Vec<PeerId>>() lucky_peers.iter().cloned().take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::<Vec<PeerId>>()
}; };
let sent = lucky_peers.len(); let sent = lucky_peers.len();
for peer_id in lucky_peers { for peer_id in lucky_peers {
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp.clone()); self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp.clone());
} }
trace!(target: "sync", "Sent {} transactions to {} peers.", tx_count, sent);
sent sent
} }
fn propagate_latest_blocks(&mut self, io: &mut SyncIo) { fn propagate_latest_blocks(&mut self, io: &mut SyncIo) {
if !self.transactions_to_send.is_empty() {
self.propagate_new_transactions(io); self.propagate_new_transactions(io);
}
let chain_info = io.chain().chain_info(); let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let blocks = self.propagate_blocks(&chain_info, io); let blocks = self.propagate_blocks(&chain_info, io);
@ -1701,8 +1695,8 @@ mod tests {
let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let retracted_blocks = vec![client.block_hash_delta_minus(1)];
// Add some balance to clients // Add some balance to clients
for h in vec![good_blocks[0], retracted_blocks[0]] { for h in &[good_blocks[0], retracted_blocks[0]] {
let block = client.block(BlockId::Hash(h)).unwrap(); let block = client.block(BlockId::Hash(*h)).unwrap();
let view = BlockView::new(&block); let view = BlockView::new(&block);
client.set_balance(view.transactions()[0].sender().unwrap(), U256::from(1_000_000_000)); client.set_balance(view.transactions()[0].sender().unwrap(), U256::from(1_000_000_000));
} }

View File

@ -66,7 +66,7 @@ use std::ops::*;
use std::sync::*; use std::sync::*;
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId};
use util::TimerToken; use util::TimerToken;
use util::{U256, Bytes, ONE_U256}; use util::{U256, ONE_U256};
use ethcore::client::Client; use ethcore::client::Client;
use ethcore::service::SyncMessage; use ethcore::service::SyncMessage;
use ethminer::Miner; use ethminer::Miner;
@ -101,9 +101,6 @@ impl Default for SyncConfig {
pub trait SyncProvider: Send + Sync { pub trait SyncProvider: Send + Sync {
/// Get sync status /// Get sync status
fn status(&self) -> SyncStatus; fn status(&self) -> SyncStatus;
/// Note that a user has submitted a new transaction.
fn new_transaction(&self, raw_transaction: Bytes);
} }
/// Ethereum network protocol handler /// Ethereum network protocol handler
@ -143,11 +140,6 @@ impl SyncProvider for EthSync {
fn status(&self) -> SyncStatus { fn status(&self) -> SyncStatus {
self.sync.read().unwrap().status() self.sync.read().unwrap().status()
} }
/// Note that a user has submitted a new transaction.
fn new_transaction(&self, raw_transaction: Bytes) {
self.sync.write().unwrap().new_transaction(raw_transaction);
}
} }
impl NetworkProtocolHandler<SyncMessage> for EthSync { impl NetworkProtocolHandler<SyncMessage> for EthSync {

View File

@ -7,6 +7,7 @@ cargo test --features ethcore/json-tests $1 \
-p ethcore \ -p ethcore \
-p ethsync \ -p ethsync \
-p ethcore-rpc \ -p ethcore-rpc \
-p ethcore-webapp \
-p parity \ -p parity \
-p ethminer \ -p ethminer \
-p bigint -p bigint

View File

@ -27,7 +27,7 @@ crossbeam = "0.2"
slab = "0.1" slab = "0.1"
sha3 = { path = "sha3" } sha3 = { path = "sha3" }
serde = "0.7.0" serde = "0.7.0"
clippy = { version = "0.0.54", optional = true } clippy = { version = "0.0.61", optional = true}
json-tests = { path = "json-tests" } json-tests = { path = "json-tests" }
igd = "0.4.2" igd = "0.4.2"
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }

View File

@ -157,6 +157,7 @@ impl KeyPair {
} }
/// EC functions /// EC functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ec { pub mod ec {
use numbers::*; use numbers::*;
use standard::*; use standard::*;
@ -193,6 +194,7 @@ pub mod ec {
} }
Ok(signature) Ok(signature)
} }
/// Verify signature. /// Verify signature.
pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> { pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> {
use secp256k1::*; use secp256k1::*;
@ -233,6 +235,7 @@ pub mod ec {
} }
/// ECDH functions /// ECDH functions
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecdh { pub mod ecdh {
use crypto::*; use crypto::*;
use crypto::{self}; use crypto::{self};
@ -254,6 +257,7 @@ pub mod ecdh {
} }
/// ECIES function /// ECIES function
#[cfg_attr(feature="dev", allow(similar_names))]
pub mod ecies { pub mod ecies {
use hash::*; use hash::*;
use bytes::*; use bytes::*;

View File

@ -392,7 +392,7 @@ macro_rules! impl_hash {
} }
} }
/// BitOr on references /// `BitOr` on references
impl<'a> BitOr for &'a $from { impl<'a> BitOr for &'a $from {
type Output = $from; type Output = $from;
@ -408,7 +408,7 @@ macro_rules! impl_hash {
} }
} }
/// Moving BitOr /// Moving `BitOr`
impl BitOr for $from { impl BitOr for $from {
type Output = $from; type Output = $from;
@ -417,7 +417,7 @@ macro_rules! impl_hash {
} }
} }
/// BitAnd on references /// `BitAnd` on references
impl <'a> BitAnd for &'a $from { impl <'a> BitAnd for &'a $from {
type Output = $from; type Output = $from;
@ -433,7 +433,7 @@ macro_rules! impl_hash {
} }
} }
/// Moving BitAnd /// Moving `BitAnd`
impl BitAnd for $from { impl BitAnd for $from {
type Output = $from; type Output = $from;
@ -442,7 +442,7 @@ macro_rules! impl_hash {
} }
} }
/// BitXor on references /// `BitXor` on references
impl <'a> BitXor for &'a $from { impl <'a> BitXor for &'a $from {
type Output = $from; type Output = $from;
@ -458,7 +458,7 @@ macro_rules! impl_hash {
} }
} }
/// Moving BitXor /// Moving `BitXor`
impl BitXor for $from { impl BitXor for $from {
type Output = $from; type Output = $from;

View File

@ -376,8 +376,10 @@ impl<Message> IoService<Message> where Message: Send + Sync + Clone + 'static {
impl<Message> Drop for IoService<Message> where Message: Send + Sync + Clone { impl<Message> Drop for IoService<Message> where Message: Send + Sync + Clone {
fn drop(&mut self) { fn drop(&mut self) {
trace!(target: "shutdown", "[IoService] Closing...");
self.host_channel.send(IoMessage::Shutdown).unwrap(); self.host_channel.send(IoMessage::Shutdown).unwrap();
self.thread.take().unwrap().join().ok(); self.thread.take().unwrap().join().ok();
trace!(target: "shutdown", "[IoService] Closed.");
} }
} }

View File

@ -120,10 +120,12 @@ impl Worker {
impl Drop for Worker { impl Drop for Worker {
fn drop(&mut self) { fn drop(&mut self) {
trace!(target: "shutdown", "[IoWorker] Closing...");
let _ = self.wait_mutex.lock(); let _ = self.wait_mutex.lock();
self.deleting.store(true, AtomicOrdering::Release); self.deleting.store(true, AtomicOrdering::Release);
self.wait.notify_all(); self.wait.notify_all();
let thread = mem::replace(&mut self.thread, None).unwrap(); let thread = mem::replace(&mut self.thread, None).unwrap();
thread.join().ok(); thread.join().ok();
trace!(target: "shutdown", "[IoWorker] Closed");
} }
} }

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation. //! Disk-backed `HashDB` implementation.
use common::*; use common::*;
use rlp::*; use rlp::*;
@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
#[cfg(test)] #[cfg(test)]
use std::env; use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
/// and latent-removal semantics. /// and latent-removal semantics.
/// ///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect. /// the removals actually take effect.
pub struct ArchiveDB { pub struct ArchiveDB {
@ -176,6 +176,7 @@ impl JournalDB for ArchiveDB {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))]
use common::*; use common::*;
use super::*; use super::*;

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation. //! Disk-backed `HashDB` implementation.
use common::*; use common::*;
use rlp::*; use rlp::*;
@ -53,11 +53,11 @@ enum RemoveFrom {
Archive, Archive,
} }
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
/// and latent-removal semantics. /// and latent-removal semantics.
/// ///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect. /// the removals actually take effect.
pub struct EarlyMergeDB { pub struct EarlyMergeDB {
@ -528,6 +528,7 @@ impl JournalDB for EarlyMergeDB {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))]
use common::*; use common::*;
use super::*; use super::*;

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! JournalDB interface and implementation. //! `JournalDB` interface and implementation.
use common::*; use common::*;
@ -25,7 +25,7 @@ mod earlymergedb;
mod overlayrecentdb; mod overlayrecentdb;
mod refcounteddb; mod refcounteddb;
/// Export the JournalDB trait. /// Export the `JournalDB` trait.
pub use self::traits::JournalDB; pub use self::traits::JournalDB;
/// A journal database algorithm. /// A journal database algorithm.
@ -70,7 +70,7 @@ impl fmt::Display for Algorithm {
} }
} }
/// Create a new JournalDB trait object. /// Create a new `JournalDB` trait object.
pub fn new(path: &str, algorithm: Algorithm) -> Box<JournalDB> { pub fn new(path: &str, algorithm: Algorithm) -> Box<JournalDB> {
match algorithm { match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)),

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! JournalDB over in-memory overlay //! `JournalDB` over in-memory overlay
use common::*; use common::*;
use rlp::*; use rlp::*;
@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
use std::env; use std::env;
use super::JournalDB; use super::JournalDB;
/// Implementation of the JournalDB trait for a disk-backed database with a memory overlay /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay
/// and, possibly, latent-removal semantics. /// and, possibly, latent-removal semantics.
/// ///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect. /// the removals actually take effect.
/// ///
@ -359,6 +359,7 @@ impl HashDB for OverlayRecentDB {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))]
use common::*; use common::*;
use super::*; use super::*;

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed, ref-counted JournalDB implementation. //! Disk-backed, ref-counted `JournalDB` implementation.
use common::*; use common::*;
use rlp::*; use rlp::*;
@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
#[cfg(test)] #[cfg(test)]
use std::env; use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
/// and latent-removal semantics. /// and latent-removal semantics.
/// ///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect. /// the removals actually take effect.
pub struct RefCountedDB { pub struct RefCountedDB {
@ -195,6 +195,7 @@ impl JournalDB for RefCountedDB {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![cfg_attr(feature="dev", allow(blacklisted_name))] #![cfg_attr(feature="dev", allow(blacklisted_name))]
#![cfg_attr(feature="dev", allow(similar_names))]
use common::*; use common::*;
use super::*; use super::*;

View File

@ -14,12 +14,12 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation. //! Disk-backed `HashDB` implementation.
use common::*; use common::*;
use hashdb::*; use hashdb::*;
/// A HashDB which can manage a short-term journal potentially containing many forks of mutually /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
/// exclusive actions. /// exclusive actions.
pub trait JournalDB : HashDB + Send + Sync { pub trait JournalDB : HashDB + Send + Sync {
/// Return a copy of ourself, in a box. /// Return a copy of ourself, in a box.

View File

@ -326,7 +326,7 @@ fn uuid_from_string(s: &str) -> Result<Uuid, UtilError> {
#[derive(Clone)] #[derive(Clone)]
/// Stored key file struct with encrypted message (cipher_text) /// Stored key file struct with encrypted message (`cipher_text`)
/// also contains password derivation function settings (PBKDF2/Scrypt) /// also contains password derivation function settings (PBKDF2/Scrypt)
pub struct KeyFileContent { pub struct KeyFileContent {
version: KeyFileVersion, version: KeyFileVersion,
@ -369,9 +369,9 @@ enum KeyFileParseError {
} }
impl KeyFileContent { impl KeyFileContent {
/// New stored key file struct with encrypted message (cipher_text) /// New stored key file struct with encrypted message (`cipher_text`)
/// also contains password derivation function settings (PBKDF2/Scrypt) /// also contains password derivation function settings (PBKDF2/Scrypt)
/// to decrypt cipher_text given the password is provided. /// to decrypt `cipher_text` given the password is provided.
pub fn new(crypto: KeyFileCrypto) -> KeyFileContent { pub fn new(crypto: KeyFileCrypto) -> KeyFileContent {
KeyFileContent { KeyFileContent {
id: new_uuid(), id: new_uuid(),

View File

@ -128,7 +128,7 @@ impl Default for AccountService {
} }
impl AccountService { impl AccountService {
/// New account service with the default location /// New account service with the keys store in default location
pub fn new() -> Self { pub fn new() -> Self {
let secret_store = RwLock::new(SecretStore::new()); let secret_store = RwLock::new(SecretStore::new());
secret_store.write().unwrap().try_import_existing(); secret_store.write().unwrap().try_import_existing();
@ -137,6 +137,15 @@ impl AccountService {
} }
} }
/// New account service with the keys store in specific location
pub fn new_in(path: &Path) -> Self {
let secret_store = RwLock::new(SecretStore::new_in(path));
secret_store.write().unwrap().try_import_existing();
AccountService {
secret_store: secret_store
}
}
#[cfg(test)] #[cfg(test)]
fn new_test(temp: &::devtools::RandomTempPath) -> Self { fn new_test(temp: &::devtools::RandomTempPath) -> Self {
let secret_store = RwLock::new(SecretStore::new_test(temp)); let secret_store = RwLock::new(SecretStore::new_test(temp));

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Key-Value store abstraction with RocksDB backend. //! Key-Value store abstraction with `RocksDB` backend.
use std::default::Default; use std::default::Default;
use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator,

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Reference-counted memory-based HashDB implementation. //! Reference-counted memory-based `HashDB` implementation.
use hash::*; use hash::*;
use bytes::*; use bytes::*;
@ -27,7 +27,7 @@ use std::collections::HashMap;
use std::default::Default; use std::default::Default;
#[derive(Debug,Clone)] #[derive(Debug,Clone)]
/// Reference-counted memory-based HashDB implementation. /// Reference-counted memory-based `HashDB` implementation.
/// ///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items /// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `remove()`, check for existence with `containce()` and lookup a hash to derive /// with `remove()`, check for existence with `containce()` and lookup a hash to derive

View File

@ -223,7 +223,7 @@ pub enum WriteStatus {
Complete Complete
} }
/// RLPx packet /// `RLPx` packet
pub struct Packet { pub struct Packet {
pub protocol: u16, pub protocol: u16,
pub data: Bytes, pub data: Bytes,
@ -237,7 +237,7 @@ enum EncryptedConnectionState {
Payload, Payload,
} }
/// Connection implementing RLPx framing /// Connection implementing `RLPx` framing
/// https://github.com/ethereum/devp2p/blob/master/rlpx.md#framing /// https://github.com/ethereum/devp2p/blob/master/rlpx.md#framing
pub struct EncryptedConnection { pub struct EncryptedConnection {
/// Underlying tcp connection /// Underlying tcp connection

View File

@ -48,7 +48,7 @@ enum HandshakeState {
StartSession, StartSession,
} }
/// RLPx protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake /// `RLPx` protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake
pub struct Handshake { pub struct Handshake {
/// Remote node public key /// Remote node public key
pub id: NodeId, pub id: NodeId,
@ -66,7 +66,7 @@ pub struct Handshake {
pub remote_ephemeral: Public, pub remote_ephemeral: Public,
/// Remote connection nonce. /// Remote connection nonce.
pub remote_nonce: H256, pub remote_nonce: H256,
/// Remote RLPx protocol version. /// Remote `RLPx` protocol version.
pub remote_version: u64, pub remote_version: u64,
/// A copy of received encryped auth packet /// A copy of received encryped auth packet
pub auth_cipher: Bytes, pub auth_cipher: Bytes,

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation. //! Disk-backed `HashDB` implementation.
use error::*; use error::*;
use hash::*; use hash::*;
@ -28,7 +28,7 @@ use std::env;
use std::collections::HashMap; use std::collections::HashMap;
use kvdb::{Database, DBTransaction}; use kvdb::{Database, DBTransaction};
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay. /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay.
/// ///
/// The operations `insert()` and `remove()` take place on the memory overlay; batches of /// The operations `insert()` and `remove()` take place on the memory overlay; batches of
/// such operations may be flushed to the disk-backed DB with `commit()` or discarded with /// such operations may be flushed to the disk-backed DB with `commit()` or discarded with

View File

@ -153,7 +153,7 @@ impl <T>ToBytes for T where T: FixedHash {
fn to_bytes_len(&self) -> usize { self.bytes().len() } fn to_bytes_len(&self) -> usize { self.bytes().len() }
} }
/// Error returned when FromBytes conversation goes wrong /// Error returned when `FromBytes` conversation goes wrong
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub enum FromBytesError { pub enum FromBytesError {
/// Expected more RLP data /// Expected more RLP data
@ -174,7 +174,7 @@ impl fmt::Display for FromBytesError {
} }
} }
/// Alias for the result of FromBytes trait /// Alias for the result of `FromBytes` trait
pub type FromBytesResult<T> = Result<T, FromBytesError>; pub type FromBytesResult<T> = Result<T, FromBytesError>;
/// Converts to given type from its bytes representation /// Converts to given type from its bytes representation

View File

@ -23,7 +23,7 @@ use super::trietraits::*;
/// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
/// ///
/// Use it as a `Trie` trait object. You can use `raw()` to get the backing TrieDB object. /// Use it as a `Trie` trait object. You can use `raw()` to get the backing `TrieDB` object.
pub struct SecTrieDB<'db> { pub struct SecTrieDB<'db> {
raw: TrieDB<'db> raw: TrieDB<'db>
} }
@ -36,12 +36,12 @@ impl<'db> SecTrieDB<'db> {
SecTrieDB { raw: TrieDB::new(db, root) } SecTrieDB { raw: TrieDB::new(db, root) }
} }
/// Get a reference to the underlying raw TrieDB struct. /// Get a reference to the underlying raw `TrieDB` struct.
pub fn raw(&self) -> &TrieDB { pub fn raw(&self) -> &TrieDB {
&self.raw &self.raw
} }
/// Get a mutable reference to the underlying raw TrieDB struct. /// Get a mutable reference to the underlying raw `TrieDB` struct.
pub fn raw_mut(&mut self) -> &TrieDB { pub fn raw_mut(&mut self) -> &TrieDB {
&mut self.raw &mut self.raw
} }

View File

@ -23,7 +23,7 @@ use super::trietraits::*;
/// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
/// ///
/// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing TrieDBMut object. /// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing `TrieDBMut` object.
pub struct SecTrieDBMut<'db> { pub struct SecTrieDBMut<'db> {
raw: TrieDBMut<'db> raw: TrieDBMut<'db>
} }

26
webapp/Cargo.toml Normal file
View File

@ -0,0 +1,26 @@
[package]
description = "Parity WebApplications crate"
name = "ethcore-webapp"
version = "1.1.0"
license = "GPL-3.0"
authors = ["Ethcore <admin@ethcore.io"]
[lib]
[dependencies]
log = "0.3"
jsonrpc-core = "2.0"
jsonrpc-http-server = { git = "https://github.com/tomusdrw/jsonrpc-http-server.git", branch="old-hyper" }
hyper = { version = "0.8", default-features = false }
iron = { version = "0.3" }
ethcore-rpc = { path = "../rpc" }
ethcore-util = { path = "../util" }
parity-webapp = { git = "https://github.com/tomusdrw/parity-webapp.git" }
# List of apps
parity-status = { git = "https://github.com/tomusdrw/parity-status.git", version = "0.1.4" }
parity-wallet = { git = "https://github.com/tomusdrw/parity-wallet.git", optional = true }
clippy = { version = "0.0.61", optional = true}
[features]
default = ["parity-wallet"]
dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"]

41
webapp/src/apps.rs Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashMap;
use page::{Page, PageHandler};
extern crate parity_status;
extern crate parity_wallet;
pub type Pages = HashMap<String, Box<Page>>;
pub fn main_page() -> Box<Page> {
Box::new(PageHandler { app: parity_status::App::default() })
}
pub fn all_pages() -> Pages {
let mut pages = Pages::new();
wallet_page(&mut pages);
pages
}
#[cfg(feature = "parity-wallet")]
fn wallet_page(pages: &mut Pages) {
pages.insert("wallet".to_owned(), Box::new(PageHandler { app: parity_wallet::App::default() }));
}
#[cfg(not(feature = "parity-wallet"))]
fn wallet_page(_pages: &mut Pages) {}

99
webapp/src/lib.rs Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore Webapplications for Parity
#![warn(missing_docs)]
#![cfg_attr(feature="nightly", plugin(clippy))]
#[macro_use]
extern crate log;
extern crate hyper;
extern crate iron;
extern crate jsonrpc_core;
extern crate jsonrpc_http_server;
extern crate ethcore_rpc as rpc;
extern crate parity_webapp;
use std::sync::Arc;
use self::jsonrpc_core::{IoHandler, IoDelegate};
use jsonrpc_http_server::ServerHandler;
mod apps;
mod page;
mod router;
/// Http server.
pub struct WebappServer {
handler: Arc<IoHandler>,
}
impl WebappServer {
/// Construct new http server object
pub fn new() -> Self {
WebappServer {
handler: Arc::new(IoHandler::new()),
}
}
/// Add io delegate.
pub fn add_delegate<D>(&self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
self.handler.add_delegate(delegate);
}
/// Start server asynchronously and returns result with `Listening` handle on success or an error.
pub fn start_http(&self, addr: &str, threads: usize) -> Result<Listening, WebappServerError> {
let addr = addr.to_owned();
let handler = self.handler.clone();
let cors_domain = jsonrpc_http_server::AccessControlAllowOrigin::Null;
let rpc = ServerHandler::new(handler, cors_domain);
let router = router::Router::new(rpc, apps::main_page(), apps::all_pages());
try!(hyper::Server::http(addr.as_ref() as &str))
.handle_threads(router, threads)
.map(|l| Listening { listening: l })
.map_err(WebappServerError::from)
}
}
/// Listening handle
pub struct Listening {
listening: hyper::server::Listening
}
impl Drop for Listening {
fn drop(&mut self) {
self.listening.close().unwrap();
}
}
/// Webapp Server startup error
#[derive(Debug)]
pub enum WebappServerError {
/// Wrapped `std::io::Error`
IoError(std::io::Error),
/// Other `hyper` error
Other(hyper::error::Error),
}
impl From<hyper::error::Error> for WebappServerError {
fn from(err: hyper::error::Error) -> Self {
match err {
hyper::error::Error::Io(e) => WebappServerError::IoError(e),
e => WebappServerError::Other(e),
}
}
}

67
webapp/src/page/mod.rs Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::io::Write;
use hyper::uri::RequestUri;
use hyper::server;
use hyper::header;
use hyper::status::StatusCode;
use parity_webapp::WebApp;
pub trait Page : Send + Sync {
fn serve_file(&self, mut path: &str, mut res: server::Response);
}
pub struct PageHandler<T : WebApp> {
pub app: T,
}
impl<T: WebApp> Page for PageHandler<T> {
fn serve_file(&self, mut path: &str, mut res: server::Response) {
// Support index file
if path == "" {
path = "index.html"
}
let file = self.app.file(path);
if let Some(f) = file {
*res.status_mut() = StatusCode::Ok;
res.headers_mut().set(header::ContentType(f.content_type.parse().unwrap()));
let _ = match res.start() {
Ok(mut raw_res) => {
for chunk in f.content.chunks(1024 * 20) {
let _ = raw_res.write(chunk);
}
raw_res.end()
},
Err(_) => {
println!("Error while writing response.");
Ok(())
},
};
}
}
}
impl server::Handler for Page {
fn handle(&self, req: server::Request, mut res: server::Response) {
*res.status_mut() = StatusCode::NotFound;
if let RequestUri::AbsolutePath(ref path) = req.uri {
self.serve_file(path, res);
}
}
}

53
webapp/src/router/api.rs Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Simple REST API
use std::sync::Arc;
use hyper;
use hyper::status::StatusCode;
use hyper::header;
use hyper::uri::RequestUri::AbsolutePath as Path;
use apps::Pages;
pub struct RestApi {
pub pages: Arc<Pages>,
}
impl RestApi {
fn list_pages(&self) -> String {
let mut s = "[".to_owned();
for name in self.pages.keys() {
s.push_str(&format!("\"{}\",", name));
}
s.push_str("\"rpc\"");
s.push_str("]");
s
}
}
impl hyper::server::Handler for RestApi {
fn handle<'b, 'a>(&'a self, req: hyper::server::Request<'a, 'b>, mut res: hyper::server::Response<'a>) {
match req.uri {
Path(ref path) if path == "apps" => {
*res.status_mut() = StatusCode::Ok;
res.headers_mut().set(header::ContentType("application/json".parse().unwrap()));
let _ = res.send(self.list_pages().as_bytes());
},
_ => (),
}
}
}

109
webapp/src/router/mod.rs Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Router implementation
use std::sync::Arc;
use hyper;
use page::Page;
use apps::Pages;
use iron::request::Url;
use jsonrpc_http_server::ServerHandler;
mod api;
pub struct Router {
rpc: ServerHandler,
api: api::RestApi,
main_page: Box<Page>,
pages: Arc<Pages>,
}
impl hyper::server::Handler for Router {
fn handle<'b, 'a>(&'a self, req: hyper::server::Request<'a, 'b>, res: hyper::server::Response<'a>) {
let (path, req) = Router::extract_request_path(req);
match path {
Some(ref url) if self.pages.contains_key(url) => {
self.pages.get(url).unwrap().handle(req, res);
},
Some(ref url) if url == "api" => {
self.api.handle(req, res);
},
_ if req.method == hyper::method::Method::Post => {
self.rpc.handle(req, res)
},
_ => self.main_page.handle(req, res),
}
}
}
impl Router {
pub fn new(rpc: ServerHandler, main_page: Box<Page>, pages: Pages) -> Self {
let pages = Arc::new(pages);
Router {
rpc: rpc,
api: api::RestApi { pages: pages.clone() },
main_page: main_page,
pages: pages,
}
}
fn extract_url(req: &hyper::server::Request) -> Option<Url> {
match req.uri {
hyper::uri::RequestUri::AbsoluteUri(ref url) => {
match Url::from_generic_url(url.clone()) {
Ok(url) => Some(url),
_ => None,
}
},
hyper::uri::RequestUri::AbsolutePath(ref path) => {
// Attempt to prepend the Host header (mandatory in HTTP/1.1)
let url_string = match req.headers.get::<hyper::header::Host>() {
Some(ref host) => {
format!("http://{}:{}{}", host.hostname, host.port.unwrap_or(80), path)
},
None => return None,
};
match Url::parse(&url_string) {
Ok(url) => Some(url),
_ => None,
}
},
_ => None,
}
}
fn extract_request_path<'a, 'b>(mut req: hyper::server::Request<'a, 'b>) -> (Option<String>, hyper::server::Request<'a, 'b>) {
let url = Router::extract_url(&req);
match url {
Some(ref url) if url.path.len() > 1 => {
let part = url.path[0].clone();
let url = url.path[1..].join("/");
req.uri = hyper::uri::RequestUri::AbsolutePath(url);
(Some(part), req)
},
Some(url) => {
let url = url.path.join("/");
req.uri = hyper::uri::RequestUri::AbsolutePath(url);
(None, req)
},
_ => {
(None, req)
},
}
}
}