Merge branch 'master' into h256
Conflicts: ethcore/src/account.rs
This commit is contained in:
commit
04d5b5cbe6
190
Cargo.lock
generated
190
Cargo.lock
generated
@ -2,7 +2,7 @@
|
||||
name = "parity"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)",
|
||||
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -11,6 +11,7 @@ dependencies = [
|
||||
"ethcore-devtools 1.1.0",
|
||||
"ethcore-rpc 1.1.0",
|
||||
"ethcore-util 1.1.0",
|
||||
"ethcore-webapp 1.1.0",
|
||||
"ethminer 1.1.0",
|
||||
"ethsync 1.1.0",
|
||||
"fdlimit 0.1.0",
|
||||
@ -97,15 +98,24 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clippy"
|
||||
version = "0.0.54"
|
||||
version = "0.0.61"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "conduit-mime-types"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cookie"
|
||||
version = "0.1.21"
|
||||
@ -184,6 +194,15 @@ dependencies = [
|
||||
"regex 0.1.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "eth-secp256k1"
|
||||
version = "0.5.4"
|
||||
@ -211,7 +230,7 @@ dependencies = [
|
||||
name = "ethcore"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethash 1.1.0",
|
||||
@ -238,14 +257,14 @@ dependencies = [
|
||||
name = "ethcore-rpc"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethash 1.1.0",
|
||||
"ethcore 1.1.0",
|
||||
"ethcore-util 1.1.0",
|
||||
"ethminer 1.1.0",
|
||||
"ethsync 1.1.0",
|
||||
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 3.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 5.0.0 (git+https://github.com/debris/jsonrpc-http-server.git)",
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -262,7 +281,7 @@ dependencies = [
|
||||
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bigint 0.1.0",
|
||||
"chrono 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -290,6 +309,23 @@ dependencies = [
|
||||
"vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-webapp"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore-rpc 1.1.0",
|
||||
"ethcore-util 1.1.0",
|
||||
"hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"iron 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 4.0.0 (git+https://github.com/tomusdrw/jsonrpc-http-server.git?branch=old-hyper)",
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-status 0.1.4 (git+https://github.com/tomusdrw/parity-status.git)",
|
||||
"parity-wallet 0.1.0 (git+https://github.com/tomusdrw/parity-wallet.git)",
|
||||
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethjson"
|
||||
version = "0.1.0"
|
||||
@ -306,7 +342,7 @@ dependencies = [
|
||||
name = "ethminer"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore 1.1.0",
|
||||
"ethcore-util 1.1.0",
|
||||
@ -320,7 +356,7 @@ dependencies = [
|
||||
name = "ethsync"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.54 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.61 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore 1.1.0",
|
||||
"ethcore-util 1.1.0",
|
||||
@ -414,6 +450,27 @@ dependencies = [
|
||||
"url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "0.9.0-mio"
|
||||
source = "git+https://github.com/hyperium/hyper?branch=mio#d55a70dc56dac1f0f03bc4c3a83db0314d48e69a"
|
||||
dependencies = [
|
||||
"cookie 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rotor 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "igd"
|
||||
version = "0.4.2"
|
||||
@ -426,6 +483,23 @@ dependencies = [
|
||||
"xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iron"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"conduit-mime-types 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"error 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"modifier 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"plugin 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.4.11"
|
||||
@ -452,14 +526,24 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "jsonrpc-http-server"
|
||||
version = "3.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
version = "4.0.0"
|
||||
source = "git+https://github.com/tomusdrw/jsonrpc-http-server.git?branch=old-hyper#46bd4e7cf8352e0efc940cf76d3dff99f1a3da15"
|
||||
dependencies = [
|
||||
"hyper 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonrpc-http-server"
|
||||
version = "5.0.0"
|
||||
source = "git+https://github.com/debris/jsonrpc-http-server.git#76fa443982b40665721fe6b1ece42fc0a53be996"
|
||||
dependencies = [
|
||||
"hyper 0.9.0-mio (git+https://github.com/hyperium/hyper?branch=mio)",
|
||||
"jsonrpc-core 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.1"
|
||||
@ -572,6 +656,11 @@ dependencies = [
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "modifier"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "net2"
|
||||
version = "0.2.23"
|
||||
@ -636,6 +725,35 @@ name = "odds"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "parity-status"
|
||||
version = "0.1.4"
|
||||
source = "git+https://github.com/tomusdrw/parity-status.git#380d13c8aafc3847a731968a6532edb09c78f2cf"
|
||||
dependencies = [
|
||||
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parity-wallet"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/tomusdrw/parity-wallet.git#9b0253f5cb88b31417450ca8be708cab2e437dfc"
|
||||
dependencies = [
|
||||
"parity-webapp 0.1.0 (git+https://github.com/tomusdrw/parity-webapp.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parity-webapp"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/tomusdrw/parity-webapp.git#a24297256bae0ae0712c6478cd1ad681828b3800"
|
||||
|
||||
[[package]]
|
||||
name = "plugin"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"typemap 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "primal"
|
||||
version = "0.2.3"
|
||||
@ -695,6 +813,16 @@ dependencies = [
|
||||
"syntex_syntax 0.30.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quick-error"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "quine-mc_cluskey"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.3.14"
|
||||
@ -739,6 +867,18 @@ dependencies = [
|
||||
"librocksdb-sys 0.2.3 (git+https://github.com/arkpar/rust-rocksdb.git)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rotor"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quick-error 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"void 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rpassword"
|
||||
version = "0.1.3"
|
||||
@ -932,6 +1072,14 @@ name = "typeable"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "typemap"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"unsafe-any 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "1.4.0"
|
||||
@ -958,6 +1106,14 @@ name = "unicode-xid"
|
||||
version = "0.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "unsafe-any"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "0.2.38"
|
||||
@ -994,6 +1150,15 @@ dependencies = [
|
||||
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vecio"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vergen"
|
||||
version = "0.1.0"
|
||||
@ -1003,6 +1168,11 @@ dependencies = [
|
||||
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.2.6"
|
||||
|
10
Cargo.toml
10
Cargo.toml
@ -21,22 +21,26 @@ daemonize = "0.2"
|
||||
num_cpus = "0.2"
|
||||
number_prefix = "0.2"
|
||||
rpassword = "0.1"
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
ethcore = { path = "ethcore" }
|
||||
ethcore-util = { path = "util" }
|
||||
ethsync = { path = "sync" }
|
||||
ethminer = { path = "miner" }
|
||||
ethcore-devtools = { path = "devtools" }
|
||||
ethcore-rpc = { path = "rpc", optional = true }
|
||||
ethcore-webapp = { path = "webapp", optional = true }
|
||||
|
||||
|
||||
[dependencies.hyper]
|
||||
version = "0.8"
|
||||
default-features = false
|
||||
|
||||
[features]
|
||||
default = ["rpc"]
|
||||
default = ["rpc", "webapp"]
|
||||
rpc = ["ethcore-rpc"]
|
||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"]
|
||||
webapp = ["ethcore-webapp"]
|
||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev",
|
||||
"ethcore-webapp/dev"]
|
||||
travis-beta = ["ethcore/json-tests"]
|
||||
travis-nightly = ["ethcore/json-tests", "dev"]
|
||||
|
||||
|
2
cov.sh
2
cov.sh
@ -23,6 +23,7 @@ cargo test \
|
||||
-p ethcore-rpc \
|
||||
-p parity \
|
||||
-p ethminer \
|
||||
-p ethcore-webapp \
|
||||
--no-run || exit $?
|
||||
rm -rf target/coverage
|
||||
mkdir -p target/coverage
|
||||
@ -33,5 +34,6 @@ kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage t
|
||||
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-*
|
||||
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethsync-*
|
||||
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-*
|
||||
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_webapp-*
|
||||
kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethminer-*
|
||||
xdg-open target/coverage/index.html
|
||||
|
1
doc.sh
1
doc.sh
@ -7,5 +7,6 @@ cargo doc --no-deps --verbose \
|
||||
-p ethcore \
|
||||
-p ethsync \
|
||||
-p ethcore-rpc \
|
||||
-p ethcore-webapp \
|
||||
-p parity \
|
||||
-p ethminer
|
||||
|
@ -17,7 +17,7 @@ ethcore-util = { path = "../util" }
|
||||
evmjit = { path = "../evmjit", optional = true }
|
||||
ethash = { path = "../ethash" }
|
||||
num_cpus = "0.2"
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
crossbeam = "0.1.5"
|
||||
lazy_static = "0.1"
|
||||
ethcore-devtools = { path = "../devtools" }
|
||||
|
@ -154,7 +154,7 @@ impl ExecutedBlock {
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for a object that is_a `ExecutedBlock`.
|
||||
/// Trait for a object that is a `ExecutedBlock`.
|
||||
pub trait IsBlock {
|
||||
/// Get the block associated with this object.
|
||||
fn block(&self) -> &ExecutedBlock;
|
||||
@ -192,7 +192,7 @@ pub struct OpenBlock<'x> {
|
||||
last_hashes: LastHashes,
|
||||
}
|
||||
|
||||
/// Just like OpenBlock, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
||||
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
||||
/// and collected the uncles.
|
||||
///
|
||||
/// There is no function available to push a transaction.
|
||||
@ -204,7 +204,7 @@ pub struct ClosedBlock {
|
||||
unclosed_state: State,
|
||||
}
|
||||
|
||||
/// Just like ClosedBlock except that we can't reopen it and it's faster.
|
||||
/// Just like `ClosedBlock` except that we can't reopen it and it's faster.
|
||||
///
|
||||
/// We actually store the post-`Engine::on_close_block` state, unlike in `ClosedBlock` where it's the pre.
|
||||
#[derive(Clone)]
|
||||
@ -216,14 +216,15 @@ pub struct LockedBlock {
|
||||
|
||||
/// A block that has a valid seal.
|
||||
///
|
||||
/// The block's header has valid seal arguments. The block cannot be reversed into a ClosedBlock or OpenBlock.
|
||||
/// The block's header has valid seal arguments. The block cannot be reversed into a `ClosedBlock` or `OpenBlock`.
|
||||
pub struct SealedBlock {
|
||||
block: ExecutedBlock,
|
||||
uncle_bytes: Bytes,
|
||||
}
|
||||
|
||||
impl<'x> OpenBlock<'x> {
|
||||
/// Create a new OpenBlock ready for transaction pushing.
|
||||
#[cfg_attr(feature="dev", allow(too_many_arguments))]
|
||||
/// Create a new `OpenBlock` ready for transaction pushing.
|
||||
pub fn new(engine: &'x Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self {
|
||||
let mut r = OpenBlock {
|
||||
block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce()), tracing),
|
||||
@ -319,7 +320,7 @@ impl<'x> OpenBlock<'x> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn this into a `ClosedBlock`. A BlockChain must be provided in order to figure out the uncles.
|
||||
/// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles.
|
||||
pub fn close(self) -> ClosedBlock {
|
||||
let mut s = self;
|
||||
|
||||
@ -454,6 +455,7 @@ impl IsBlock for SealedBlock {
|
||||
}
|
||||
|
||||
/// Enact the block given by block header, transactions and uncles
|
||||
#[cfg_attr(feature="dev", allow(too_many_arguments))]
|
||||
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, tracing: bool, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<LockedBlock, Error> {
|
||||
{
|
||||
if ::log::max_log_level() >= ::log::LogLevel::Trace {
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! A queue of blocks. Sits between network or other I/O and the BlockChain.
|
||||
//! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
|
||||
//! Sorts them ready for blockchain insertion.
|
||||
use std::thread::{JoinHandle, self};
|
||||
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
|
||||
@ -89,7 +89,7 @@ impl BlockQueueInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
|
||||
/// A queue of blocks. Sits between network or other I/O and the `BlockChain`.
|
||||
/// Sorts them ready for blockchain insertion.
|
||||
pub struct BlockQueue {
|
||||
panic_handler: Arc<PanicHandler>,
|
||||
@ -116,6 +116,7 @@ struct VerifyingBlock {
|
||||
}
|
||||
|
||||
struct QueueSignal {
|
||||
deleting: Arc<AtomicBool>,
|
||||
signalled: AtomicBool,
|
||||
message_channel: IoChannel<NetSyncMessage>,
|
||||
}
|
||||
@ -123,10 +124,16 @@ struct QueueSignal {
|
||||
impl QueueSignal {
|
||||
#[cfg_attr(feature="dev", allow(bool_comparison))]
|
||||
fn set(&self) {
|
||||
// Do not signal when we are about to close
|
||||
if self.deleting.load(AtomicOrdering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
||||
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&self) {
|
||||
self.signalled.store(false, AtomicOrdering::Relaxed);
|
||||
}
|
||||
@ -150,8 +157,12 @@ impl BlockQueue {
|
||||
bad: Mutex::new(HashSet::new()),
|
||||
});
|
||||
let more_to_verify = Arc::new(Condvar::new());
|
||||
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
|
||||
let deleting = Arc::new(AtomicBool::new(false));
|
||||
let ready_signal = Arc::new(QueueSignal {
|
||||
deleting: deleting.clone(),
|
||||
signalled: AtomicBool::new(false),
|
||||
message_channel: message_channel
|
||||
});
|
||||
let empty = Arc::new(Condvar::new());
|
||||
let panic_handler = PanicHandler::new_in_arc();
|
||||
|
||||
@ -431,12 +442,14 @@ impl MayPanic for BlockQueue {
|
||||
|
||||
impl Drop for BlockQueue {
|
||||
fn drop(&mut self) {
|
||||
trace!(target: "shutdown", "[BlockQueue] Closing...");
|
||||
self.clear();
|
||||
self.deleting.store(true, AtomicOrdering::Release);
|
||||
self.more_to_verify.notify_all();
|
||||
for t in self.verifiers.drain(..) {
|
||||
t.join().unwrap();
|
||||
}
|
||||
trace!(target: "shutdown", "[BlockQueue] Closed.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -427,6 +427,7 @@ impl BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(similar_names))]
|
||||
/// Inserts the block into backing cache database.
|
||||
/// Expects the block to be valid and already verified.
|
||||
/// If the block is already known, does nothing.
|
||||
@ -855,6 +856,7 @@ impl BlockChain {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||
use std::str::FromStr;
|
||||
use rustc_serialize::hex::FromHex;
|
||||
use util::hash::*;
|
||||
|
@ -60,7 +60,7 @@ impl Indexer {
|
||||
}
|
||||
|
||||
/// Return bloom which are dependencies for given index.
|
||||
///
|
||||
///
|
||||
/// Bloom indexes are ordered from lowest to highest.
|
||||
pub fn lower_level_bloom_indexes(&self, index: &BloomIndex) -> Vec<BloomIndex> {
|
||||
// this is the lowest level
|
||||
@ -87,6 +87,7 @@ impl Indexer {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||
use chainfilter::BloomIndex;
|
||||
use chainfilter::indexer::Indexer;
|
||||
|
||||
|
@ -23,7 +23,7 @@ use chainfilter::{BloomIndex, FilterDataSource, ChainFilter};
|
||||
|
||||
/// In memory cache for blooms.
|
||||
///
|
||||
/// Stores all blooms in HashMap, which indexes them by `BloomIndex`.
|
||||
/// Stores all blooms in `HashMap`, which indexes them by `BloomIndex`.
|
||||
pub struct MemoryCache {
|
||||
blooms: HashMap<BloomIndex, H2048>,
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ use block_queue::{BlockQueue, BlockQueueInfo};
|
||||
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
||||
use client::{BlockId, TransactionId, UncleId, ClientConfig, BlockChainClient};
|
||||
use env_info::EnvInfo;
|
||||
use executive::{Executive, Executed, contract_address};
|
||||
use executive::{Executive, Executed, TransactOptions, contract_address};
|
||||
use receipt::LocalizedReceipt;
|
||||
pub use blockchain::CacheSize as BlockChainCacheSize;
|
||||
|
||||
@ -418,7 +418,8 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
|
||||
// give the sender max balance
|
||||
state.sub_balance(&sender, &balance);
|
||||
state.add_balance(&sender, &U256::max_value());
|
||||
Executive::new(&mut state, &env_info, self.engine.deref().deref()).transact(t, false)
|
||||
let options = TransactOptions { tracing: false, check_nonce: false };
|
||||
Executive::new(&mut state, &env_info, self.engine.deref().deref()).transact(t, options)
|
||||
}
|
||||
|
||||
// TODO [todr] Should be moved to miner crate eventually.
|
||||
|
@ -44,7 +44,7 @@ pub enum Error {
|
||||
/// Invoked instruction
|
||||
instruction: &'static str,
|
||||
/// How many stack elements was requested by instruction
|
||||
wanted: usize,
|
||||
wanted: usize,
|
||||
/// How many elements were on stack
|
||||
on_stack: usize
|
||||
},
|
||||
@ -64,8 +64,8 @@ pub enum Error {
|
||||
}
|
||||
|
||||
/// Evm result.
|
||||
///
|
||||
/// Returns gas_left if execution is successful, otherwise error.
|
||||
///
|
||||
/// Returns `gas_left` if execution is successful, otherwise error.
|
||||
pub type Result = result::Result<U256, Error>;
|
||||
|
||||
/// Evm interface.
|
||||
|
@ -36,6 +36,14 @@ pub fn contract_address(address: &Address, nonce: &U256) -> Address {
|
||||
From::from(stream.out().sha3())
|
||||
}
|
||||
|
||||
/// Transaction execution options.
|
||||
pub struct TransactOptions {
|
||||
/// Enable call tracing.
|
||||
pub tracing: bool,
|
||||
/// Check transaction nonce before execution.
|
||||
pub check_nonce: bool,
|
||||
}
|
||||
|
||||
/// Transaction execution receipt.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct Executed {
|
||||
@ -110,7 +118,7 @@ impl<'a> Executive<'a> {
|
||||
}
|
||||
|
||||
/// This funtion should be used to execute transaction.
|
||||
pub fn transact(&'a mut self, t: &SignedTransaction, tracing: bool) -> Result<Executed, Error> {
|
||||
pub fn transact(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result<Executed, Error> {
|
||||
let sender = try!(t.sender());
|
||||
let nonce = self.state.nonce(&sender);
|
||||
|
||||
@ -124,8 +132,10 @@ impl<'a> Executive<'a> {
|
||||
let init_gas = t.gas - base_gas_required;
|
||||
|
||||
// validate transaction nonce
|
||||
if t.nonce != nonce {
|
||||
return Err(From::from(ExecutionError::InvalidNonce { expected: nonce, got: t.nonce }));
|
||||
if options.check_nonce {
|
||||
if t.nonce != nonce {
|
||||
return Err(From::from(ExecutionError::InvalidNonce { expected: nonce, got: t.nonce }));
|
||||
}
|
||||
}
|
||||
|
||||
// validate if transaction fits into given block
|
||||
@ -151,7 +161,7 @@ impl<'a> Executive<'a> {
|
||||
self.state.inc_nonce(&sender);
|
||||
self.state.sub_balance(&sender, &U256::from(gas_cost));
|
||||
|
||||
let mut substate = Substate::new(tracing);
|
||||
let mut substate = Substate::new(options.tracing);
|
||||
|
||||
let (gas_left, output) = match t.action {
|
||||
Action::Create => {
|
||||
@ -881,7 +891,8 @@ mod tests {
|
||||
|
||||
let executed = {
|
||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||
ex.transact(&t, false).unwrap()
|
||||
let opts = TransactOptions { check_nonce: true, tracing: false };
|
||||
ex.transact(&t, opts).unwrap()
|
||||
};
|
||||
|
||||
assert_eq!(executed.gas, U256::from(100_000));
|
||||
@ -914,7 +925,8 @@ mod tests {
|
||||
|
||||
let res = {
|
||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||
ex.transact(&t, false)
|
||||
let opts = TransactOptions { check_nonce: true, tracing: false };
|
||||
ex.transact(&t, opts)
|
||||
};
|
||||
|
||||
match res {
|
||||
@ -945,7 +957,8 @@ mod tests {
|
||||
|
||||
let res = {
|
||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||
ex.transact(&t, false)
|
||||
let opts = TransactOptions { check_nonce: true, tracing: false };
|
||||
ex.transact(&t, opts)
|
||||
};
|
||||
|
||||
match res {
|
||||
@ -978,7 +991,8 @@ mod tests {
|
||||
|
||||
let res = {
|
||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||
ex.transact(&t, false)
|
||||
let opts = TransactOptions { check_nonce: true, tracing: false };
|
||||
ex.transact(&t, opts)
|
||||
};
|
||||
|
||||
match res {
|
||||
@ -1011,7 +1025,8 @@ mod tests {
|
||||
|
||||
let res = {
|
||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||
ex.transact(&t, false)
|
||||
let opts = TransactOptions { check_nonce: true, tracing: false };
|
||||
ex.transact(&t, opts)
|
||||
};
|
||||
|
||||
match res {
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
use common::*;
|
||||
use engine::Engine;
|
||||
use executive::Executive;
|
||||
use executive::{Executive, TransactOptions};
|
||||
use account_db::*;
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "json-tests")]
|
||||
@ -220,7 +220,8 @@ impl State {
|
||||
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
|
||||
// let old = self.to_pod();
|
||||
|
||||
let e = try!(Executive::new(self, env_info, engine).transact(t, tracing));
|
||||
let options = TransactOptions { tracing: tracing, check_nonce: true };
|
||||
let e = try!(Executive::new(self, env_info, engine).transact(t, options));
|
||||
|
||||
// TODO uncomment once to_pod() works correctly.
|
||||
// trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod()));
|
||||
|
@ -55,7 +55,7 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res
|
||||
|
||||
/// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash.
|
||||
/// Still operates on a individual block
|
||||
/// Returns a PreverifiedBlock structure populated with transactions
|
||||
/// Returns a `PreverifiedBlock` structure populated with transactions
|
||||
pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result<PreverifiedBlock, Error> {
|
||||
try!(engine.verify_block_unordered(&header, Some(&bytes)));
|
||||
for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::<Header>()) {
|
||||
@ -279,7 +279,7 @@ mod tests {
|
||||
|
||||
impl BlockProvider for TestBlockChain {
|
||||
fn have_tracing(&self) -> bool { false }
|
||||
|
||||
|
||||
fn is_known(&self, hash: &H256) -> bool {
|
||||
self.blocks.contains_key(hash)
|
||||
}
|
||||
@ -331,6 +331,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(feature="dev", allow(similar_names))]
|
||||
fn test_verify_block() {
|
||||
// Test against morden
|
||||
let mut good = Header::new();
|
||||
|
1
fmt.sh
1
fmt.sh
@ -9,6 +9,7 @@ $RUSTFMT ./json/src/lib.rs
|
||||
$RUSTFMT ./miner/src/lib.rs
|
||||
$RUSTFMT ./parity/main.rs
|
||||
$RUSTFMT ./rpc/src/lib.rs
|
||||
$RUSTFMT ./webapp/src/lib.rs
|
||||
$RUSTFMT ./sync/src/lib.rs
|
||||
$RUSTFMT ./util/src/lib.rs
|
||||
|
||||
|
2
hook.sh
2
hook.sh
@ -7,6 +7,6 @@ echo "set -e" >> $FILE
|
||||
echo "cargo build --release --features dev" >> $FILE
|
||||
# Build tests
|
||||
echo "cargo test --no-run --features dev \\" >> $FILE
|
||||
echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" >> $FILE
|
||||
echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer -p ethcore-webapp" >> $FILE
|
||||
echo "" >> $FILE
|
||||
chmod +x $FILE
|
||||
|
@ -145,14 +145,20 @@ struct Dispatch {
|
||||
return_type_ty: Option<P<Ty>>,
|
||||
}
|
||||
|
||||
fn implement_dispatch_arm_invoke(
|
||||
// This is the expanded version of this:
|
||||
//
|
||||
// let invoke_serialize_stmt = quote_stmt!(cx, {
|
||||
// ::bincode::serde::serialize(& $output_type_id { payload: self. $function_name ($hand_param_a, $hand_param_b) }, ::bincode::SizeLimit::Infinite).unwrap()
|
||||
// });
|
||||
//
|
||||
// But the above does not allow comma-separated expressions for arbitrary number
|
||||
// of parameters ...$hand_param_a, $hand_param_b, ... $hand_param_n
|
||||
fn implement_dispatch_arm_invoke_stmt(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
dispatch: &Dispatch,
|
||||
) -> P<ast::Expr>
|
||||
) -> ast::Stmt
|
||||
{
|
||||
let deserialize_expr = quote_expr!(cx, ::bincode::serde::deserialize_from(r, ::bincode::SizeLimit::Infinite).expect("ipc deserialization error, aborting"));
|
||||
let input_type_id = builder.id(dispatch.input_type_name.clone().unwrap().as_str());
|
||||
let function_name = builder.id(dispatch.function_name.as_str());
|
||||
let output_type_id = builder.id(dispatch.return_type_name.clone().unwrap().as_str());
|
||||
|
||||
@ -161,63 +167,71 @@ fn implement_dispatch_arm_invoke(
|
||||
quote_expr!(cx, input. $arg_ident)
|
||||
}).collect::<Vec<P<ast::Expr>>>();
|
||||
|
||||
// This is the expanded version of this:
|
||||
//
|
||||
// let invoke_serialize_stmt = quote_stmt!(cx, {
|
||||
// ::bincode::serde::serialize(& $output_type_id { payload: self. $function_name ($hand_param_a, $hand_param_b) }, ::bincode::SizeLimit::Infinite).unwrap()
|
||||
// });
|
||||
//
|
||||
// But the above does not allow comma-separated expressions for arbitrary number
|
||||
// of parameters ...$hand_param_a, $hand_param_b, ... $hand_param_n
|
||||
let invoke_serialize_stmt = {
|
||||
let ext_cx = &*cx;
|
||||
::quasi::parse_stmt_panic(&mut ::syntax::parse::new_parser_from_tts(
|
||||
ext_cx.parse_sess(),
|
||||
ext_cx.cfg(),
|
||||
{
|
||||
let _sp = ext_cx.call_site();
|
||||
let mut tt = ::std::vec::Vec::new();
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("bincode"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("serde"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("serialize"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::BinOp(::syntax::parse::token::And)));
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&output_type_id, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("payload"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Colon));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("self"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Dot));
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&function_name, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
let ext_cx = &*cx;
|
||||
::quasi::parse_stmt_panic(&mut ::syntax::parse::new_parser_from_tts(
|
||||
ext_cx.parse_sess(),
|
||||
ext_cx.cfg(),
|
||||
{
|
||||
let _sp = ext_cx.call_site();
|
||||
let mut tt = ::std::vec::Vec::new();
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("bincode"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("serde"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("serialize"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::BinOp(::syntax::parse::token::And)));
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&output_type_id, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("payload"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Colon));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("self"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Dot));
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&function_name, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
|
||||
for arg_expr in input_args_exprs {
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&arg_expr, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Comma));
|
||||
}
|
||||
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace)));
|
||||
for arg_expr in input_args_exprs {
|
||||
tt.extend(::quasi::ToTokens::to_tokens(&arg_expr, ext_cx).into_iter());
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Comma));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("bincode"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("SizeLimit"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("Infinite"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Dot));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("unwrap"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace)));
|
||||
tt
|
||||
}))
|
||||
}
|
||||
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Comma));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("bincode"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("SizeLimit"), ::syntax::parse::token::ModName)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::ModSep));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("Infinite"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Dot));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::Ident(ext_cx.ident_of("unwrap"), ::syntax::parse::token::Plain)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::OpenDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Paren)));
|
||||
tt.push(::syntax::ast::TokenTree::Token(_sp, ::syntax::parse::token::CloseDelim(::syntax::parse::token::Brace)));
|
||||
tt
|
||||
})).unwrap()
|
||||
}
|
||||
|
||||
fn implement_dispatch_arm_invoke(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
dispatch: &Dispatch,
|
||||
buffer: bool,
|
||||
) -> P<ast::Expr>
|
||||
{
|
||||
let deserialize_expr = if buffer {
|
||||
quote_expr!(cx, ::bincode::serde::deserialize(buf).expect("ipc deserialization error, aborting"))
|
||||
} else {
|
||||
quote_expr!(cx, ::bincode::serde::deserialize_from(r, ::bincode::SizeLimit::Infinite).expect("ipc deserialization error, aborting"))
|
||||
};
|
||||
|
||||
let input_type_id = builder.id(dispatch.input_type_name.clone().unwrap().as_str());
|
||||
|
||||
let invoke_serialize_stmt = implement_dispatch_arm_invoke_stmt(cx, builder, dispatch);
|
||||
quote_expr!(cx, {
|
||||
let input: $input_type_id = $deserialize_expr;
|
||||
$invoke_serialize_stmt
|
||||
@ -225,14 +239,31 @@ fn implement_dispatch_arm_invoke(
|
||||
}
|
||||
|
||||
/// generates dispatch match for method id
|
||||
fn implement_dispatch_arm(cx: &ExtCtxt, builder: &aster::AstBuilder, index: u32, dispatch: &Dispatch)
|
||||
-> ast::Arm
|
||||
fn implement_dispatch_arm(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
index: u32,
|
||||
dispatch: &Dispatch,
|
||||
buffer: bool,
|
||||
) -> ast::Arm
|
||||
{
|
||||
let index_ident = builder.id(format!("{}", index).as_str());
|
||||
let invoke_expr = implement_dispatch_arm_invoke(cx, builder, dispatch);
|
||||
let invoke_expr = implement_dispatch_arm_invoke(cx, builder, dispatch, buffer);
|
||||
quote_arm!(cx, $index_ident => { $invoke_expr } )
|
||||
}
|
||||
|
||||
fn implement_dispatch_arms(
|
||||
cx: &ExtCtxt,
|
||||
builder: &aster::AstBuilder,
|
||||
dispatches: &[Dispatch],
|
||||
buffer: bool,
|
||||
) -> Vec<ast::Arm>
|
||||
{
|
||||
let mut index = -1;
|
||||
dispatches.iter()
|
||||
.map(|dispatch| { index = index + 1; implement_dispatch_arm(cx, builder, index as u32, dispatch, buffer) }).collect()
|
||||
}
|
||||
|
||||
/// generates client type for specified server type
|
||||
/// for say `Service` it generates `ServiceClient`
|
||||
fn push_client_struct(cx: &ExtCtxt, builder: &aster::AstBuilder, item: &Item, push: &mut FnMut(Annotatable)) {
|
||||
@ -511,9 +542,9 @@ fn implement_interface(
|
||||
dispatch_table.push(push_invoke_signature_aster(builder, &impl_item, signature, push));
|
||||
}
|
||||
}
|
||||
let mut index = -1;
|
||||
let dispatch_arms: Vec<_> = dispatch_table.iter()
|
||||
.map(|dispatch| { index = index + 1; implement_dispatch_arm(cx, builder, index as u32, dispatch) }).collect();
|
||||
|
||||
let dispatch_arms = implement_dispatch_arms(cx, builder, &dispatch_table, false);
|
||||
let dispatch_arms_buffered = implement_dispatch_arms(cx, builder, &dispatch_table, true);
|
||||
|
||||
Ok((quote_item!(cx,
|
||||
impl $impl_generics ::ipc::IpcInterface<$ty> for $ty $where_clause {
|
||||
@ -531,6 +562,14 @@ fn implement_interface(
|
||||
_ => vec![]
|
||||
}
|
||||
}
|
||||
|
||||
fn dispatch_buf(&self, method_num: u16, buf: &[u8]) -> Vec<u8>
|
||||
{
|
||||
match method_num {
|
||||
$dispatch_arms_buffered
|
||||
_ => vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
).unwrap(), dispatch_table))
|
||||
}
|
||||
|
12
ipc/nano/Cargo.toml
Normal file
12
ipc/nano/Cargo.toml
Normal file
@ -0,0 +1,12 @@
|
||||
[package]
|
||||
name = "ethcore-ipc-nano"
|
||||
version = "1.1.0"
|
||||
authors = ["Nikolay Volf <nikolay@ethcore.io>"]
|
||||
license = "GPL-3.0"
|
||||
|
||||
[features]
|
||||
|
||||
[dependencies]
|
||||
"ethcore-ipc" = { path = "../rpc" }
|
||||
nanomsg = "0.5.0"
|
||||
log = "0.3"
|
214
ipc/nano/src/lib.rs
Normal file
214
ipc/nano/src/lib.rs
Normal file
@ -0,0 +1,214 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! IPC over nanomsg transport
|
||||
|
||||
extern crate ethcore_ipc as ipc;
|
||||
extern crate nanomsg;
|
||||
#[macro_use] extern crate log;
|
||||
|
||||
pub use ipc::*;
|
||||
|
||||
use std::sync::*;
|
||||
use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut};
|
||||
|
||||
const POLL_TIMEOUT: isize = 100;
|
||||
|
||||
pub struct Worker<S> where S: IpcInterface<S> {
|
||||
service: Arc<S>,
|
||||
sockets: Vec<(Socket, Endpoint)>,
|
||||
polls: Vec<PollFd>,
|
||||
buf: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SocketError {
|
||||
DuplexLink
|
||||
}
|
||||
|
||||
impl<S> Worker<S> where S: IpcInterface<S> {
|
||||
pub fn new(service: Arc<S>) -> Worker<S> {
|
||||
Worker::<S> {
|
||||
service: service.clone(),
|
||||
sockets: Vec::new(),
|
||||
polls: Vec::new(),
|
||||
buf: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll(&mut self) {
|
||||
let mut request = PollRequest::new(&mut self.polls[..]);
|
||||
let _result_guard = Socket::poll(&mut request, POLL_TIMEOUT);
|
||||
|
||||
for (fd_index, fd) in request.get_fds().iter().enumerate() {
|
||||
if fd.can_read() {
|
||||
let (ref mut socket, _) = self.sockets[fd_index];
|
||||
unsafe { self.buf.set_len(0); }
|
||||
match socket.nb_read_to_end(&mut self.buf) {
|
||||
Ok(method_sign_len) => {
|
||||
if method_sign_len >= 2 {
|
||||
// method_num
|
||||
let method_num = self.buf[1] as u16 * 256 + self.buf[0] as u16;
|
||||
// payload
|
||||
let payload = &self.buf[2..];
|
||||
|
||||
// dispatching for ipc interface
|
||||
let result = self.service.dispatch_buf(method_num, payload);
|
||||
|
||||
if let Err(e) = socket.nb_write(&result) {
|
||||
warn!(target: "ipc", "Failed to write response: {:?}", e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn!(target: "ipc", "Failed to read method signature from socket: unexpected message length({})", method_sign_len);
|
||||
}
|
||||
},
|
||||
Err(Error::TryAgain) => {
|
||||
},
|
||||
Err(x) => {
|
||||
warn!(target: "ipc", "Error polling connections {:?}", x);
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn rebuild_poll_request(&mut self) {
|
||||
self.polls = self.sockets.iter()
|
||||
.map(|&(ref socket, _)| socket.new_pollfd(PollInOut::In))
|
||||
.collect::<Vec<PollFd>>();
|
||||
}
|
||||
|
||||
pub fn add_duplex(&mut self, addr: &str) -> Result<(), SocketError> {
|
||||
let mut socket = try!(Socket::new(Protocol::Pair).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
|
||||
SocketError::DuplexLink
|
||||
}));
|
||||
|
||||
let endpoint = try!(socket.bind(addr).map_err(|e| {
|
||||
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", addr, e);
|
||||
SocketError::DuplexLink
|
||||
}));
|
||||
|
||||
self.sockets.push((socket, endpoint));
|
||||
|
||||
self.rebuild_poll_request();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::Worker;
|
||||
use ipc::*;
|
||||
use std::io::{Read, Write};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use nanomsg::{Socket, Protocol, Endpoint};
|
||||
|
||||
struct TestInvoke {
|
||||
method_num: u16,
|
||||
params: Vec<u8>,
|
||||
}
|
||||
|
||||
struct DummyService {
|
||||
methods_stack: RwLock<Vec<TestInvoke>>,
|
||||
}
|
||||
|
||||
impl DummyService {
|
||||
fn new() -> DummyService {
|
||||
DummyService { methods_stack: RwLock::new(Vec::new()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl IpcInterface<DummyService> for DummyService {
|
||||
fn dispatch<R>(&self, _r: &mut R) -> Vec<u8> where R: Read {
|
||||
vec![]
|
||||
}
|
||||
fn dispatch_buf(&self, method_num: u16, buf: &[u8]) -> Vec<u8> {
|
||||
self.methods_stack.write().unwrap().push(
|
||||
TestInvoke {
|
||||
method_num: method_num,
|
||||
params: buf.to_vec(),
|
||||
});
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
fn dummy_write(addr: &str, buf: &[u8]) -> (Socket, Endpoint) {
|
||||
let mut socket = Socket::new(Protocol::Pair).unwrap();
|
||||
let endpoint = socket.connect(addr).unwrap();
|
||||
//thread::sleep_ms(10);
|
||||
socket.write(buf).unwrap();
|
||||
(socket, endpoint)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_create_worker() {
|
||||
let worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
assert_eq!(0, worker.sockets.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_add_duplex_socket_to_worker() {
|
||||
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
worker.add_duplex("ipc:///tmp/parity-test10.ipc").unwrap();
|
||||
assert_eq!(1, worker.sockets.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn worker_can_poll_empty() {
|
||||
let service = Arc::new(DummyService::new());
|
||||
let mut worker = Worker::<DummyService>::new(service.clone());
|
||||
worker.add_duplex("ipc:///tmp/parity-test20.ipc").unwrap();
|
||||
worker.poll();
|
||||
assert_eq!(0, service.methods_stack.read().unwrap().len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn worker_can_poll() {
|
||||
let url = "ipc:///tmp/parity-test30.ipc";
|
||||
|
||||
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
worker.add_duplex(url).unwrap();
|
||||
|
||||
let (_socket, _endpoint) = dummy_write(url, &vec![0, 0, 7, 7, 6, 6]);
|
||||
worker.poll();
|
||||
|
||||
assert_eq!(1, worker.service.methods_stack.read().unwrap().len());
|
||||
assert_eq!(0, worker.service.methods_stack.read().unwrap()[0].method_num);
|
||||
assert_eq!([7, 7, 6, 6], worker.service.methods_stack.read().unwrap()[0].params[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn worker_can_poll_long() {
|
||||
let url = "ipc:///tmp/parity-test40.ipc";
|
||||
|
||||
let mut worker = Worker::<DummyService>::new(Arc::new(DummyService::new()));
|
||||
worker.add_duplex(url).unwrap();
|
||||
|
||||
let message = [0u8; 1024*1024];
|
||||
|
||||
let (_socket, _endpoint) = dummy_write(url, &message);
|
||||
worker.poll();
|
||||
|
||||
assert_eq!(1, worker.service.methods_stack.read().unwrap().len());
|
||||
assert_eq!(0, worker.service.methods_stack.read().unwrap()[0].method_num);
|
||||
assert_eq!(vec![0u8; 1024*1024-2], worker.service.methods_stack.read().unwrap()[0].params);
|
||||
}
|
||||
}
|
@ -21,8 +21,12 @@ use std::marker::Sync;
|
||||
use std::sync::atomic::*;
|
||||
|
||||
pub trait IpcInterface<T> {
|
||||
/// reads the message from io, dispatches the call and returns result
|
||||
/// reads the message from io, dispatches the call and returns serialized result
|
||||
fn dispatch<R>(&self, r: &mut R) -> Vec<u8> where R: Read;
|
||||
|
||||
/// deserialize the payload from buffer, dispatches invoke and returns serialized result
|
||||
/// (for non-blocking io)
|
||||
fn dispatch_buf(&self, method_num: u16, buf: &[u8]) -> Vec<u8>;
|
||||
}
|
||||
|
||||
/// serializes method invocation (method_num and parameters) to the stream specified by `w`
|
||||
|
@ -10,7 +10,7 @@ rustc-serialize = "0.3"
|
||||
serde = "0.7.0"
|
||||
serde_json = "0.7.0"
|
||||
serde_macros = { version = "0.7.0", optional = true }
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
|
||||
[build-dependencies]
|
||||
serde_codegen = { version = "0.7.0", optional = true }
|
||||
|
@ -17,7 +17,7 @@ log = "0.3"
|
||||
env_logger = "0.3"
|
||||
rustc-serialize = "0.3"
|
||||
rayon = "0.3.1"
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
@ -105,9 +105,15 @@ pub trait MinerService : Send + Sync {
|
||||
/// Get the sealing work package and if `Some`, apply some transform.
|
||||
fn map_sealing_work<F, T>(&self, chain: &BlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T;
|
||||
|
||||
/// Query pending transactions for hash
|
||||
/// Query pending transactions for hash.
|
||||
fn transaction(&self, hash: &H256) -> Option<SignedTransaction>;
|
||||
|
||||
/// Get a list of all pending transactions.
|
||||
fn pending_transactions(&self) -> Vec<SignedTransaction>;
|
||||
|
||||
/// Returns highest transaction nonce for given address.
|
||||
fn last_nonce(&self, address: &Address) -> Option<U256>;
|
||||
|
||||
/// Suggested gas price
|
||||
fn sensible_gas_price(&self) -> U256 { x!(20000000000u64) }
|
||||
}
|
||||
|
@ -94,6 +94,7 @@ impl Miner {
|
||||
}
|
||||
|
||||
/// Prepares new block for sealing including top transactions from queue.
|
||||
#[cfg_attr(feature="dev", allow(match_same_arms))]
|
||||
fn prepare_sealing(&self, chain: &BlockChainClient) {
|
||||
trace!(target: "miner", "prepare_sealing: entering");
|
||||
let transactions = self.transaction_queue.lock().unwrap().top_transactions();
|
||||
@ -164,7 +165,7 @@ impl Miner {
|
||||
}
|
||||
);
|
||||
if let Some(block) = b {
|
||||
if sealing_work.peek_last_ref().map(|pb| pb.block().fields().header.hash() != block.block().fields().header.hash()).unwrap_or(true) {
|
||||
if sealing_work.peek_last_ref().map_or(true, |pb| pb.block().fields().header.hash() != block.block().fields().header.hash()) {
|
||||
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
|
||||
sealing_work.push(block);
|
||||
}
|
||||
@ -200,7 +201,7 @@ impl MinerService for Miner {
|
||||
|
||||
fn sensible_gas_price(&self) -> U256 {
|
||||
// 10% above our minimum.
|
||||
self.transaction_queue.lock().unwrap().minimal_gas_price().clone() * x!(110) / x!(100)
|
||||
*self.transaction_queue.lock().unwrap().minimal_gas_price() * x!(110) / x!(100)
|
||||
}
|
||||
|
||||
fn author(&self) -> Address {
|
||||
@ -227,6 +228,15 @@ impl MinerService for Miner {
|
||||
queue.find(hash)
|
||||
}
|
||||
|
||||
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
||||
let queue = self.transaction_queue.lock().unwrap();
|
||||
queue.top_transactions()
|
||||
}
|
||||
|
||||
fn last_nonce(&self, address: &Address) -> Option<U256> {
|
||||
self.transaction_queue.lock().unwrap().last_nonce(address)
|
||||
}
|
||||
|
||||
fn update_sealing(&self, chain: &BlockChainClient) {
|
||||
if self.sealing_enabled.load(atomic::Ordering::Relaxed) {
|
||||
let current_no = chain.chain_info().best_block_number;
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
//! Transaction Queue
|
||||
//!
|
||||
//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions
|
||||
//! `TransactionQueue` keeps track of all transactions seen by the node (received from other peers) and own transactions
|
||||
//! and orders them by priority. Top priority transactions are those with low nonce height (difference between
|
||||
//! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used
|
||||
//! for comparison (higher gas price = higher priority).
|
||||
@ -179,7 +179,7 @@ impl VerifiedTransaction {
|
||||
|
||||
/// Holds transactions accessible by (address, nonce) and by priority
|
||||
///
|
||||
/// TransactionSet keeps number of entries below limit, but it doesn't
|
||||
/// `TransactionSet` keeps number of entries below limit, but it doesn't
|
||||
/// automatically happen during `insert/remove` operations.
|
||||
/// You have to call `enforce_limit` to remove lowest priority transactions from set.
|
||||
struct TransactionSet {
|
||||
@ -262,7 +262,7 @@ pub struct AccountDetails {
|
||||
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
|
||||
const GAS_LIMIT_HYSTERESIS: usize = 10; // %
|
||||
|
||||
/// TransactionQueue implementation
|
||||
/// `TransactionQueue` implementation
|
||||
pub struct TransactionQueue {
|
||||
/// Gas Price threshold for transactions that can be imported to this queue (defaults to 0)
|
||||
minimal_gas_price: U256,
|
||||
@ -523,6 +523,11 @@ impl TransactionQueue {
|
||||
self.last_nonces.clear();
|
||||
}
|
||||
|
||||
/// Returns highest transaction nonce for given address.
|
||||
pub fn last_nonce(&self, address: &Address) -> Option<U256> {
|
||||
self.last_nonces.get(address).cloned()
|
||||
}
|
||||
|
||||
/// Checks if there are any transactions in `future` that should actually be promoted to `current`
|
||||
/// (because nonce matches).
|
||||
fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) {
|
||||
@ -1255,4 +1260,29 @@ mod test {
|
||||
assert_eq!(stats.future, 0);
|
||||
assert_eq!(stats.pending, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_none_when_transaction_from_given_address_does_not_exist() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.last_nonce(&Address::default()), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_correct_nonce_when_transactions_from_given_address_exist() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let tx = new_tx();
|
||||
let from = tx.sender().unwrap();
|
||||
let nonce = tx.nonce;
|
||||
let details = |a: &Address| AccountDetails { nonce: nonce, balance: !U256::zero() };
|
||||
|
||||
// when
|
||||
txq.add(tx, &details).unwrap();
|
||||
|
||||
// then
|
||||
assert_eq!(txq.last_nonce(&from), Some(nonce));
|
||||
}
|
||||
}
|
||||
|
171
parity/main.rs
171
parity/main.rs
@ -19,6 +19,7 @@
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(feature="dev", feature(plugin))]
|
||||
#![cfg_attr(feature="dev", plugin(clippy))]
|
||||
#![cfg_attr(feature="dev", allow(useless_format))]
|
||||
extern crate docopt;
|
||||
extern crate num_cpus;
|
||||
extern crate rustc_serialize;
|
||||
@ -41,6 +42,8 @@ extern crate rpassword;
|
||||
|
||||
#[cfg(feature = "rpc")]
|
||||
extern crate ethcore_rpc as rpc;
|
||||
#[cfg(feature = "webapp")]
|
||||
extern crate ethcore_webapp as webapp;
|
||||
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::fs::File;
|
||||
@ -62,6 +65,10 @@ use ethminer::{Miner, MinerService};
|
||||
use docopt::Docopt;
|
||||
use daemonize::Daemonize;
|
||||
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
||||
#[cfg(feature = "rpc")]
|
||||
use rpc::Server as RpcServer;
|
||||
#[cfg(feature = "webapp")]
|
||||
use webapp::Listening as WebappServer;
|
||||
|
||||
mod price_info;
|
||||
|
||||
@ -82,7 +89,7 @@ Parity. Ethereum Client.
|
||||
|
||||
Usage:
|
||||
parity daemon <pid-file> [options]
|
||||
parity account (new | list)
|
||||
parity account (new | list) [options]
|
||||
parity [options]
|
||||
|
||||
Protocol Options:
|
||||
@ -93,7 +100,7 @@ Protocol Options:
|
||||
-d --db-path PATH Specify the database & configuration directory path
|
||||
[default: $HOME/.parity].
|
||||
--keys-path PATH Specify the path for JSON key files to be found
|
||||
[default: $HOME/.web3/keys].
|
||||
[default: $HOME/.parity/keys].
|
||||
--identity NAME Specify your node's name.
|
||||
|
||||
Account Options:
|
||||
@ -117,7 +124,7 @@ Networking Options:
|
||||
string or input to SHA3 operation.
|
||||
|
||||
API and Console Options:
|
||||
-j --jsonrpc Enable the JSON-RPC API sever.
|
||||
-j --jsonrpc Enable the JSON-RPC API server.
|
||||
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local [default: local].
|
||||
@ -129,6 +136,13 @@ API and Console Options:
|
||||
interface. APIS is a comma-delimited list of API
|
||||
name. Possible name are web3, eth and net.
|
||||
[default: web3,eth,net,personal].
|
||||
-w --webapp Enable the web applications server (e.g. status page).
|
||||
--webapp-port PORT Specify the port portion of the WebApps server
|
||||
[default: 8080].
|
||||
--webapp-interface IP Specify the hostname portion of the WebApps
|
||||
server, IP should be an interface's IP address, or
|
||||
all (all interfaces) or local [default: local].
|
||||
|
||||
|
||||
Sealing/Mining Options:
|
||||
--usd-per-tx USD Amount of USD to be paid for a basic transaction
|
||||
@ -213,6 +227,9 @@ struct Args {
|
||||
flag_jsonrpc_port: u16,
|
||||
flag_jsonrpc_cors: String,
|
||||
flag_jsonrpc_apis: String,
|
||||
flag_webapp: bool,
|
||||
flag_webapp_port: u16,
|
||||
flag_webapp_interface: String,
|
||||
flag_author: String,
|
||||
flag_usd_per_tx: String,
|
||||
flag_usd_per_eth: String,
|
||||
@ -269,10 +286,10 @@ fn setup_rpc_server(
|
||||
sync: Arc<EthSync>,
|
||||
secret_store: Arc<AccountService>,
|
||||
miner: Arc<Miner>,
|
||||
url: &str,
|
||||
url: &SocketAddr,
|
||||
cors_domain: &str,
|
||||
apis: Vec<&str>
|
||||
) -> Option<Arc<PanicHandler>> {
|
||||
) -> RpcServer {
|
||||
use rpc::v1::*;
|
||||
|
||||
let server = rpc::RpcServer::new();
|
||||
@ -290,9 +307,42 @@ fn setup_rpc_server(
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(server.start_http(url, cors_domain, ::num_cpus::get()))
|
||||
let start_result = server.start_http(url, cors_domain);
|
||||
match start_result {
|
||||
Err(rpc::RpcServerError::IoError(err)) => die_with_io_error(err),
|
||||
Err(e) => die!("{:?}", e),
|
||||
Ok(server) => server,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "webapp")]
|
||||
fn setup_webapp_server(
|
||||
client: Arc<Client>,
|
||||
sync: Arc<EthSync>,
|
||||
secret_store: Arc<AccountService>,
|
||||
miner: Arc<Miner>,
|
||||
url: &str
|
||||
) -> WebappServer {
|
||||
use rpc::v1::*;
|
||||
|
||||
let server = webapp::WebappServer::new();
|
||||
server.add_delegate(Web3Client::new().to_delegate());
|
||||
server.add_delegate(NetClient::new(&sync).to_delegate());
|
||||
server.add_delegate(EthClient::new(&client, &sync, &secret_store, &miner).to_delegate());
|
||||
server.add_delegate(EthFilterClient::new(&client, &miner).to_delegate());
|
||||
server.add_delegate(PersonalClient::new(&secret_store).to_delegate());
|
||||
let start_result = server.start_http(url, ::num_cpus::get());
|
||||
match start_result {
|
||||
Err(webapp::WebappServerError::IoError(err)) => die_with_io_error(err),
|
||||
Err(e) => die!("{:?}", e),
|
||||
Ok(handle) => handle,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "rpc"))]
|
||||
struct RpcServer;
|
||||
|
||||
#[cfg(not(feature = "rpc"))]
|
||||
fn setup_rpc_server(
|
||||
_client: Arc<Client>,
|
||||
@ -302,8 +352,22 @@ fn setup_rpc_server(
|
||||
_url: &str,
|
||||
_cors_domain: &str,
|
||||
_apis: Vec<&str>
|
||||
) -> Option<Arc<PanicHandler>> {
|
||||
None
|
||||
) -> ! {
|
||||
die!("Your Parity version has been compiled without JSON-RPC support.")
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "webapp"))]
|
||||
struct WebappServer;
|
||||
|
||||
#[cfg(not(feature = "webapp"))]
|
||||
fn setup_webapp_server(
|
||||
_client: Arc<Client>,
|
||||
_sync: Arc<EthSync>,
|
||||
_secret_store: Arc<AccountService>,
|
||||
_miner: Arc<Miner>,
|
||||
_url: &str
|
||||
) -> ! {
|
||||
die!("Your Parity version has been compiled without WebApps support.")
|
||||
}
|
||||
|
||||
fn print_version() {
|
||||
@ -361,9 +425,9 @@ impl Configuration {
|
||||
die!("{}: Invalid basic transaction price given in USD. Must be a decimal number.", self.args.flag_usd_per_tx)
|
||||
});
|
||||
let usd_per_eth = match self.args.flag_usd_per_eth.as_str() {
|
||||
"etherscan" => price_info::PriceInfo::get().map(|x| x.ethusd).unwrap_or_else(|| {
|
||||
"etherscan" => price_info::PriceInfo::get().map_or_else(|| {
|
||||
die!("Unable to retrieve USD value of ETH from etherscan. Rerun with a different value for --usd-per-eth.")
|
||||
}),
|
||||
}, |x| x.ethusd),
|
||||
x => FromStr::from_str(x).unwrap_or_else(|_| die!("{}: Invalid ether price given in USD. Must be a decimal number.", x))
|
||||
};
|
||||
let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
|
||||
@ -383,7 +447,7 @@ impl Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
fn _keys_path(&self) -> String {
|
||||
fn keys_path(&self) -> String {
|
||||
self.args.flag_keys_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
|
||||
}
|
||||
|
||||
@ -421,7 +485,6 @@ impl Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(useless_format))]
|
||||
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
|
||||
let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port));
|
||||
let public_address = if self.args.flag_nat.starts_with("extip:") {
|
||||
@ -450,7 +513,6 @@ impl Configuration {
|
||||
ret
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(useless_format))]
|
||||
fn client_config(&self) -> ClientConfig {
|
||||
let mut client_config = ClientConfig::default();
|
||||
match self.args.flag_cache {
|
||||
@ -505,7 +567,7 @@ impl Configuration {
|
||||
fn execute_account_cli(&self) {
|
||||
use util::keys::store::SecretStore;
|
||||
use rpassword::read_password;
|
||||
let mut secret_store = SecretStore::new();
|
||||
let mut secret_store = SecretStore::new_in(Path::new(&self.keys_path()));
|
||||
if self.args.cmd_new {
|
||||
println!("Please note that password is NOT RECOVERABLE.");
|
||||
println!("Type password: ");
|
||||
@ -539,7 +601,7 @@ impl Configuration {
|
||||
.into_iter()
|
||||
}).collect::<Vec<_>>();
|
||||
|
||||
let account_service = AccountService::new();
|
||||
let account_service = AccountService::new_in(Path::new(&self.keys_path()));
|
||||
for d in &self.args.flag_unlock {
|
||||
let a = Address::from_str(clean_0x(&d)).unwrap_or_else(|_| {
|
||||
die!("{}: Invalid address for --unlock. Must be 40 hex characters, without the 0x at the beginning.", d)
|
||||
@ -551,7 +613,6 @@ impl Configuration {
|
||||
account_service
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(useless_format))]
|
||||
fn execute_client(&self) {
|
||||
// Setup panic handler
|
||||
let panic_handler = PanicHandler::new_in_arc();
|
||||
@ -569,7 +630,10 @@ impl Configuration {
|
||||
let account_service = Arc::new(self.account_service());
|
||||
|
||||
// Build client
|
||||
let mut service = ClientService::start(self.client_config(), spec, net_settings, &Path::new(&self.path())).unwrap();
|
||||
let mut service = ClientService::start(
|
||||
self.client_config(), spec, net_settings, &Path::new(&self.path())
|
||||
).unwrap_or_else(|e| die_with_error(e));
|
||||
|
||||
panic_handler.forward_from(&service);
|
||||
let client = service.client();
|
||||
|
||||
@ -584,7 +648,8 @@ impl Configuration {
|
||||
let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone());
|
||||
|
||||
// Setup rpc
|
||||
if self.args.flag_jsonrpc || self.args.flag_rpc {
|
||||
let rpc_server = if self.args.flag_jsonrpc || self.args.flag_rpc {
|
||||
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
||||
let url = format!("{}:{}",
|
||||
match self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_interface).as_str() {
|
||||
"all" => "0.0.0.0",
|
||||
@ -593,23 +658,41 @@ impl Configuration {
|
||||
},
|
||||
self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port)
|
||||
);
|
||||
SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url));
|
||||
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
||||
// TODO: use this as the API list.
|
||||
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
|
||||
let server_handler = setup_rpc_server(
|
||||
let addr = SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url));
|
||||
let cors_domain = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
|
||||
|
||||
Some(setup_rpc_server(
|
||||
service.client(),
|
||||
sync.clone(),
|
||||
account_service.clone(),
|
||||
miner.clone(),
|
||||
&addr,
|
||||
&cors_domain,
|
||||
apis.split(',').collect()
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let webapp_server = if self.args.flag_webapp {
|
||||
let url = format!("{}:{}",
|
||||
match self.args.flag_webapp_interface.as_str() {
|
||||
"all" => "0.0.0.0",
|
||||
"local" => "127.0.0.1",
|
||||
x => x,
|
||||
},
|
||||
self.args.flag_webapp_port
|
||||
);
|
||||
Some(setup_webapp_server(
|
||||
service.client(),
|
||||
sync.clone(),
|
||||
account_service.clone(),
|
||||
miner.clone(),
|
||||
&url,
|
||||
cors,
|
||||
apis.split(',').collect()
|
||||
);
|
||||
if let Some(handler) = server_handler {
|
||||
panic_handler.forward_from(handler.deref());
|
||||
}
|
||||
}
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Register IO handler
|
||||
let io_handler = Arc::new(ClientIoHandler {
|
||||
@ -621,11 +704,11 @@ impl Configuration {
|
||||
service.io().register_handler(io_handler).expect("Error registering IO handler");
|
||||
|
||||
// Handle exit
|
||||
wait_for_exit(panic_handler);
|
||||
wait_for_exit(panic_handler, rpc_server, webapp_server);
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_for_exit(panic_handler: Arc<PanicHandler>) {
|
||||
fn wait_for_exit(panic_handler: Arc<PanicHandler>, _rpc_server: Option<RpcServer>, _webapp_server: Option<WebappServer>) {
|
||||
let exit = Arc::new(Condvar::new());
|
||||
|
||||
// Handle possible exits
|
||||
@ -639,6 +722,30 @@ fn wait_for_exit(panic_handler: Arc<PanicHandler>) {
|
||||
// Wait for signal
|
||||
let mutex = Mutex::new(());
|
||||
let _ = exit.wait(mutex.lock().unwrap()).unwrap();
|
||||
info!("Finishing work, please wait...");
|
||||
}
|
||||
|
||||
fn die_with_error(e: ethcore::error::Error) -> ! {
|
||||
use ethcore::error::Error;
|
||||
|
||||
match e {
|
||||
Error::Util(UtilError::StdIo(e)) => die_with_io_error(e),
|
||||
_ => die!("{:?}", e),
|
||||
}
|
||||
}
|
||||
fn die_with_io_error(e: std::io::Error) -> ! {
|
||||
match e.kind() {
|
||||
std::io::ErrorKind::PermissionDenied => {
|
||||
die!("No permissions to bind to specified port.")
|
||||
},
|
||||
std::io::ErrorKind::AddrInUse => {
|
||||
die!("Specified address is already in use. Please make sure that nothing is listening on the same port or try using a different one.")
|
||||
},
|
||||
std::io::ErrorKind::AddrNotAvailable => {
|
||||
die!("Could not use specified interface or given address is invalid.")
|
||||
},
|
||||
_ => die!("{:?}", e),
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
|
@ -19,8 +19,8 @@ impl PriceInfo {
|
||||
.and_then(|mut s| s.read_to_string(&mut body).ok())
|
||||
.and_then(|_| Json::from_str(&body).ok())
|
||||
.and_then(|json| json.find_path(&["result", "ethusd"])
|
||||
.and_then(|obj| match obj {
|
||||
&Json::String(ref s) => Some(PriceInfo {
|
||||
.and_then(|obj| match *obj {
|
||||
Json::String(ref s) => Some(PriceInfo {
|
||||
ethusd: FromStr::from_str(&s).unwrap()
|
||||
}),
|
||||
_ => None
|
||||
|
@ -13,7 +13,7 @@ log = "0.3"
|
||||
serde = "0.7.0"
|
||||
serde_json = "0.7.0"
|
||||
jsonrpc-core = "2.0"
|
||||
jsonrpc-http-server = "3.0"
|
||||
jsonrpc-http-server = { git = "https://github.com/debris/jsonrpc-http-server.git" }
|
||||
ethcore-util = { path = "../util" }
|
||||
ethcore = { path = "../ethcore" }
|
||||
ethash = { path = "../ethash" }
|
||||
@ -22,7 +22,7 @@ ethminer = { path = "../miner" }
|
||||
rustc-serialize = "0.3"
|
||||
transient-hashmap = "0.1"
|
||||
serde_macros = { version = "0.7.0", optional = true }
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
|
||||
[build-dependencies]
|
||||
serde_codegen = { version = "0.7.0", optional = true }
|
||||
|
@ -33,10 +33,10 @@ extern crate ethminer;
|
||||
extern crate transient_hashmap;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use util::panics::PanicHandler;
|
||||
use std::net::SocketAddr;
|
||||
use self::jsonrpc_core::{IoHandler, IoDelegate};
|
||||
|
||||
pub use jsonrpc_http_server::{Server, RpcServerError};
|
||||
pub mod v1;
|
||||
|
||||
/// Http server.
|
||||
@ -45,7 +45,7 @@ pub struct RpcServer {
|
||||
}
|
||||
|
||||
impl RpcServer {
|
||||
/// Construct new http server object with given number of threads.
|
||||
/// Construct new http server object.
|
||||
pub fn new() -> RpcServer {
|
||||
RpcServer {
|
||||
handler: Arc::new(IoHandler::new()),
|
||||
@ -57,18 +57,9 @@ impl RpcServer {
|
||||
self.handler.add_delegate(delegate);
|
||||
}
|
||||
|
||||
/// Start server asynchronously in new thread and returns panic handler.
|
||||
pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc<PanicHandler> {
|
||||
let addr = addr.to_owned();
|
||||
/// Start server asynchronously and returns result with `Server` handle on success or an error.
|
||||
pub fn start_http(&self, addr: &SocketAddr, cors_domain: &str) -> Result<Server, RpcServerError> {
|
||||
let cors_domain = cors_domain.to_owned();
|
||||
let panic_handler = PanicHandler::new_in_arc();
|
||||
let ph = panic_handler.clone();
|
||||
let server = jsonrpc_http_server::Server::new(self.handler.clone());
|
||||
thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || {
|
||||
ph.catch_panic(move || {
|
||||
server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads);
|
||||
}).unwrap()
|
||||
}).expect("Error while creating jsonrpc http thread");
|
||||
panic_handler
|
||||
Server::start(addr, self.handler.clone(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain))
|
||||
}
|
||||
}
|
||||
|
@ -43,6 +43,10 @@ fn default_gas() -> U256 {
|
||||
U256::from(21_000)
|
||||
}
|
||||
|
||||
fn default_call_gas() -> U256 {
|
||||
U256::from(50_000_000)
|
||||
}
|
||||
|
||||
/// Eth rpc implementation.
|
||||
pub struct EthClient<C, S, A, M, EM = ExternalMiner>
|
||||
where C: BlockChainClient,
|
||||
@ -175,27 +179,30 @@ impl<C, S, A, M, EM> EthClient<C, S, A, M, EM>
|
||||
Ok(EthTransaction {
|
||||
nonce: request.nonce.unwrap_or_else(|| client.nonce(&from)),
|
||||
action: request.to.map_or(Action::Create, Action::Call),
|
||||
gas: request.gas.unwrap_or_else(default_gas),
|
||||
gas: request.gas.unwrap_or_else(default_call_gas),
|
||||
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
|
||||
value: request.value.unwrap_or_else(U256::zero),
|
||||
data: request.data.map_or_else(Vec::new, |d| d.to_vec())
|
||||
}.fake_sign(from))
|
||||
}
|
||||
|
||||
fn dispatch_transaction(&self, signed_transaction: SignedTransaction, raw_transaction: Vec<u8>) -> Result<Value, Error> {
|
||||
fn dispatch_transaction(&self, signed_transaction: SignedTransaction) -> Result<Value, Error> {
|
||||
let hash = signed_transaction.hash();
|
||||
|
||||
|
||||
let import = {
|
||||
let miner = take_weak!(self.miner);
|
||||
let client = take_weak!(self.client);
|
||||
take_weak!(self.miner).import_transactions(vec![signed_transaction], |a: &Address| AccountDetails {
|
||||
nonce: client.nonce(a),
|
||||
nonce: miner
|
||||
.last_nonce(a)
|
||||
.map(|nonce| nonce + U256::one())
|
||||
.unwrap_or_else(|| client.nonce(a)),
|
||||
balance: client.balance(a),
|
||||
})
|
||||
};
|
||||
|
||||
match import.into_iter().collect::<Result<Vec<_>, _>>() {
|
||||
Ok(_) => {
|
||||
take_weak!(self.sync).new_transaction(raw_transaction);
|
||||
to_value(&hash)
|
||||
}
|
||||
Err(e) => {
|
||||
@ -484,7 +491,11 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
|
||||
let client = take_weak!(self.client);
|
||||
let miner = take_weak!(self.miner);
|
||||
EthTransaction {
|
||||
nonce: request.nonce.unwrap_or_else(|| client.nonce(&request.from)),
|
||||
nonce: request.nonce
|
||||
.or_else(|| miner
|
||||
.last_nonce(&request.from)
|
||||
.map(|nonce| nonce + U256::one()))
|
||||
.unwrap_or_else(|| client.nonce(&request.from)),
|
||||
action: request.to.map_or(Action::Create, Action::Call),
|
||||
gas: request.gas.unwrap_or_else(default_gas),
|
||||
gas_price: request.gas_price.unwrap_or_else(|| miner.sensible_gas_price()),
|
||||
@ -492,8 +503,7 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
|
||||
data: request.data.map_or_else(Vec::new, |d| d.to_vec()),
|
||||
}.sign(&secret)
|
||||
};
|
||||
let raw_transaction = encode(&signed_transaction).to_vec();
|
||||
self.dispatch_transaction(signed_transaction, raw_transaction)
|
||||
self.dispatch_transaction(signed_transaction)
|
||||
},
|
||||
Err(_) => { to_value(&H256::zero()) }
|
||||
}
|
||||
@ -505,7 +515,7 @@ impl<C, S, A, M, EM> Eth for EthClient<C, S, A, M, EM>
|
||||
.and_then(|(raw_transaction, )| {
|
||||
let raw_transaction = raw_transaction.to_vec();
|
||||
match UntrustedRlp::new(&raw_transaction).as_val() {
|
||||
Ok(signed_transaction) => self.dispatch_transaction(signed_transaction, raw_transaction),
|
||||
Ok(signed_transaction) => self.dispatch_transaction(signed_transaction),
|
||||
Err(_) => to_value(&H256::zero()),
|
||||
}
|
||||
})
|
||||
|
@ -49,7 +49,7 @@ impl<A> Personal for PersonalClient<A> where A: AccountProvider + 'static {
|
||||
|(pass, )| {
|
||||
let store = take_weak!(self.accounts);
|
||||
match store.new_account(&pass) {
|
||||
Ok(address) => Ok(Value::String(format!("0x{:?}", address))),
|
||||
Ok(address) => to_value(&address),
|
||||
Err(_) => Err(Error::internal_error())
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ use util::numbers::{Uint, U256};
|
||||
use ethcore::client::{TestBlockChainClient, EachBlockWith, Executed, TransactionId};
|
||||
use ethcore::log_entry::{LocalizedLogEntry, LogEntry};
|
||||
use ethcore::receipt::LocalizedReceipt;
|
||||
use ethcore::transaction::{Transaction, Action};
|
||||
use v1::{Eth, EthClient};
|
||||
use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService, TestExternalMiner};
|
||||
|
||||
@ -52,7 +53,7 @@ fn miner_service() -> Arc<TestMinerService> {
|
||||
struct EthTester {
|
||||
pub client: Arc<TestBlockChainClient>,
|
||||
pub sync: Arc<TestSyncProvider>,
|
||||
_accounts_provider: Arc<TestAccountProvider>,
|
||||
pub accounts_provider: Arc<TestAccountProvider>,
|
||||
miner: Arc<TestMinerService>,
|
||||
hashrates: Arc<RwLock<HashMap<H256, U256>>>,
|
||||
pub io: IoHandler,
|
||||
@ -72,7 +73,7 @@ impl Default for EthTester {
|
||||
EthTester {
|
||||
client: client,
|
||||
sync: sync,
|
||||
_accounts_provider: ap,
|
||||
accounts_provider: ap,
|
||||
miner: miner,
|
||||
io: io,
|
||||
hashrates: hashrates,
|
||||
@ -453,9 +454,53 @@ fn rpc_eth_estimate_gas_default_block() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn rpc_eth_send_transaction() {
|
||||
unimplemented!()
|
||||
let account = TestAccount::new("123");
|
||||
let address = account.address();
|
||||
let secret = account.secret.clone();
|
||||
|
||||
let tester = EthTester::default();
|
||||
tester.accounts_provider.accounts.write().unwrap().insert(address.clone(), account);
|
||||
let request = r#"{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "eth_sendTransaction",
|
||||
"params": [{
|
||||
"from": ""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"",
|
||||
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
|
||||
"gas": "0x76c0",
|
||||
"gasPrice": "0x9184e72a000",
|
||||
"value": "0x9184e72a"
|
||||
}],
|
||||
"id": 1
|
||||
}"#;
|
||||
|
||||
let t = Transaction {
|
||||
nonce: U256::zero(),
|
||||
gas_price: U256::from(0x9184e72a000u64),
|
||||
gas: U256::from(0x76c0),
|
||||
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||
value: U256::from(0x9184e72au64),
|
||||
data: vec![]
|
||||
}.sign(&secret);
|
||||
|
||||
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":1}"#;
|
||||
|
||||
assert_eq!(tester.io.handle_request(request.as_ref()), Some(response));
|
||||
|
||||
tester.miner.last_nonces.write().unwrap().insert(address.clone(), U256::zero());
|
||||
|
||||
let t = Transaction {
|
||||
nonce: U256::one(),
|
||||
gas_price: U256::from(0x9184e72a000u64),
|
||||
gas: U256::from(0x76c0),
|
||||
action: Action::Call(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||
value: U256::from(0x9184e72au64),
|
||||
data: vec![]
|
||||
}.sign(&secret);
|
||||
|
||||
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", t.hash()).as_ref() + r#"","id":1}"#;
|
||||
|
||||
assert_eq!(tester.io.handle_request(request.as_ref()), Some(response));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -20,7 +20,7 @@ use std::sync::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use util::hash::{Address, H256, FixedHash};
|
||||
use util::crypto::{Secret, Signature};
|
||||
use util::crypto::{Secret, Signature, KeyPair};
|
||||
use util::keys::store::{AccountProvider, SigningError, EncryptedHashMapError};
|
||||
|
||||
/// Account mock.
|
||||
@ -30,23 +30,31 @@ pub struct TestAccount {
|
||||
pub unlocked: bool,
|
||||
/// Account's password.
|
||||
pub password: String,
|
||||
/// Account's secret.
|
||||
pub secret: Secret,
|
||||
}
|
||||
|
||||
impl TestAccount {
|
||||
/// Creates new test account.
|
||||
pub fn new(password: &str) -> Self {
|
||||
let pair = KeyPair::create().unwrap();
|
||||
TestAccount {
|
||||
unlocked: false,
|
||||
password: password.to_owned(),
|
||||
secret: pair.secret().clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns account address.
|
||||
pub fn address(&self) -> Address {
|
||||
KeyPair::from_secret(self.secret.clone()).unwrap().address()
|
||||
}
|
||||
}
|
||||
|
||||
/// Test account provider.
|
||||
pub struct TestAccountProvider {
|
||||
accounts: RwLock<HashMap<Address, TestAccount>>,
|
||||
/// Added accounts passwords.
|
||||
pub adds: RwLock<Vec<String>>,
|
||||
/// Test provider accounts.
|
||||
pub accounts: RwLock<HashMap<Address, TestAccount>>,
|
||||
}
|
||||
|
||||
impl TestAccountProvider {
|
||||
@ -54,7 +62,6 @@ impl TestAccountProvider {
|
||||
pub fn new(accounts: HashMap<Address, TestAccount>) -> Self {
|
||||
TestAccountProvider {
|
||||
accounts: RwLock::new(accounts),
|
||||
adds: RwLock::new(vec![]),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -76,14 +83,20 @@ impl AccountProvider for TestAccountProvider {
|
||||
}
|
||||
|
||||
fn new_account(&self, pass: &str) -> Result<Address, io::Error> {
|
||||
let mut adds = self.adds.write().unwrap();
|
||||
let address = Address::from(adds.len() as u64 + 2);
|
||||
adds.push(pass.to_owned());
|
||||
let account = TestAccount::new(pass);
|
||||
let address = KeyPair::from_secret(account.secret.clone()).unwrap().address();
|
||||
self.accounts.write().unwrap().insert(address.clone(), account);
|
||||
Ok(address)
|
||||
}
|
||||
|
||||
fn account_secret(&self, _account: &Address) -> Result<Secret, SigningError> {
|
||||
Ok(Secret::random())
|
||||
fn account_secret(&self, address: &Address) -> Result<Secret, SigningError> {
|
||||
// todo: consider checking if account is unlock. some test may need alteration then.
|
||||
self.accounts
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(address)
|
||||
.ok_or(SigningError::NoAccount)
|
||||
.map(|acc| acc.secret.clone())
|
||||
}
|
||||
|
||||
fn sign(&self, _account: &Address, _message: &H256) -> Result<Signature, SigningError> {
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
//! Test implementation of miner service.
|
||||
|
||||
use util::{Address, H256, Bytes};
|
||||
use util::{Address, H256, Bytes, U256};
|
||||
use util::standard::*;
|
||||
use ethcore::error::Error;
|
||||
use ethcore::client::BlockChainClient;
|
||||
@ -27,19 +27,22 @@ use ethminer::{MinerService, MinerStatus, AccountDetails};
|
||||
/// Test miner service.
|
||||
pub struct TestMinerService {
|
||||
/// Imported transactions.
|
||||
pub imported_transactions: RwLock<Vec<H256>>,
|
||||
pub imported_transactions: Mutex<Vec<SignedTransaction>>,
|
||||
/// Latest closed block.
|
||||
pub latest_closed_block: Mutex<Option<ClosedBlock>>,
|
||||
/// Pre-existed pending transactions
|
||||
pub pending_transactions: Mutex<HashMap<H256, SignedTransaction>>,
|
||||
/// Last nonces.
|
||||
pub last_nonces: RwLock<HashMap<Address, U256>>,
|
||||
}
|
||||
|
||||
impl Default for TestMinerService {
|
||||
fn default() -> TestMinerService {
|
||||
TestMinerService {
|
||||
imported_transactions: RwLock::new(Vec::new()),
|
||||
imported_transactions: Mutex::new(Vec::new()),
|
||||
latest_closed_block: Mutex::new(None),
|
||||
pending_transactions: Mutex::new(HashMap::new()),
|
||||
last_nonces: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -56,28 +59,56 @@ impl MinerService for TestMinerService {
|
||||
}
|
||||
|
||||
/// Imports transactions to transaction queue.
|
||||
fn import_transactions<T>(&self, _transactions: Vec<SignedTransaction>, _fetch_account: T) -> Vec<Result<(), Error>>
|
||||
where T: Fn(&Address) -> AccountDetails { unimplemented!(); }
|
||||
fn import_transactions<T>(&self, transactions: Vec<SignedTransaction>, _fetch_account: T) -> Vec<Result<(), Error>>
|
||||
where T: Fn(&Address) -> AccountDetails {
|
||||
// lets assume that all txs are valid
|
||||
self.imported_transactions.lock().unwrap().extend_from_slice(&transactions);
|
||||
|
||||
transactions
|
||||
.iter()
|
||||
.map(|_| Ok(()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns hashes of transactions currently in pending
|
||||
fn pending_transactions_hashes(&self) -> Vec<H256> { vec![] }
|
||||
fn pending_transactions_hashes(&self) -> Vec<H256> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// Removes all transactions from the queue and restart mining operation.
|
||||
fn clear_and_reset(&self, _chain: &BlockChainClient) { unimplemented!(); }
|
||||
fn clear_and_reset(&self, _chain: &BlockChainClient) {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
/// Called when blocks are imported to chain, updates transactions queue.
|
||||
fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) { unimplemented!(); }
|
||||
fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
/// New chain head event. Restart mining operation.
|
||||
fn update_sealing(&self, _chain: &BlockChainClient) { unimplemented!(); }
|
||||
fn update_sealing(&self, _chain: &BlockChainClient) {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn map_sealing_work<F, T>(&self, _chain: &BlockChainClient, _f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T { unimplemented!(); }
|
||||
fn map_sealing_work<F, T>(&self, _chain: &BlockChainClient, _f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> {
|
||||
self.pending_transactions.lock().unwrap().get(hash).and_then(|tx_ref| Some(tx_ref.clone()))
|
||||
self.pending_transactions.lock().unwrap().get(hash).cloned()
|
||||
}
|
||||
|
||||
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
||||
self.pending_transactions.lock().unwrap().values().cloned().collect()
|
||||
}
|
||||
|
||||
fn last_nonce(&self, address: &Address) -> Option<U256> {
|
||||
self.last_nonces.read().unwrap().get(address).cloned()
|
||||
}
|
||||
|
||||
/// Submit `seal` as a valid solution for the header of `pow_hash`.
|
||||
/// Will check the seal, but not actually insert the block into the chain.
|
||||
fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> { unimplemented!(); }
|
||||
fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
//! Test implementation of SyncProvider.
|
||||
|
||||
use util::{U256, Bytes};
|
||||
use util::{U256};
|
||||
use ethsync::{SyncProvider, SyncStatus, SyncState};
|
||||
use std::sync::{RwLock};
|
||||
|
||||
@ -59,8 +59,5 @@ impl SyncProvider for TestSyncProvider {
|
||||
fn status(&self) -> SyncStatus {
|
||||
self.status.read().unwrap().clone()
|
||||
}
|
||||
|
||||
fn new_transaction(&self, _raw_transaction: Bytes) {
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,8 +22,7 @@ use util::numbers::*;
|
||||
use std::collections::*;
|
||||
|
||||
fn accounts_provider() -> Arc<TestAccountProvider> {
|
||||
let mut accounts = HashMap::new();
|
||||
accounts.insert(Address::from(1), TestAccount::new("test"));
|
||||
let accounts = HashMap::new();
|
||||
let ap = TestAccountProvider::new(accounts);
|
||||
Arc::new(ap)
|
||||
}
|
||||
@ -38,7 +37,11 @@ fn setup() -> (Arc<TestAccountProvider>, IoHandler) {
|
||||
|
||||
#[test]
|
||||
fn accounts() {
|
||||
let (_test_provider, io) = setup();
|
||||
let (test_provider, io) = setup();
|
||||
test_provider.accounts
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(Address::from(1), TestAccount::new("test"));
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#;
|
||||
@ -49,11 +52,22 @@ fn accounts() {
|
||||
|
||||
#[test]
|
||||
fn new_account() {
|
||||
let (_test_provider, io) = setup();
|
||||
|
||||
let (test_provider, io) = setup();
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000002","id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
let res = io.handle_request(request);
|
||||
|
||||
let accounts = test_provider.accounts.read().unwrap();
|
||||
assert_eq!(accounts.len(), 1);
|
||||
|
||||
let address = accounts
|
||||
.keys()
|
||||
.nth(0)
|
||||
.cloned()
|
||||
.unwrap();
|
||||
|
||||
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{:?}", address).as_ref() + r#"","id":1}"#;
|
||||
|
||||
assert_eq!(res, Some(response));
|
||||
}
|
||||
|
||||
|
@ -190,9 +190,6 @@ pub trait EthFilter: Sized + Send + Sync + 'static {
|
||||
/// Returns filter changes since last poll.
|
||||
fn filter_changes(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Returns filter logs.
|
||||
fn filter_logs(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
/// Uninstalls filter.
|
||||
fn uninstall_filter(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
|
||||
|
||||
@ -203,7 +200,7 @@ pub trait EthFilter: Sized + Send + Sync + 'static {
|
||||
delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter);
|
||||
delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter);
|
||||
delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes);
|
||||
delegate.add_method("eth_getFilterLogs", EthFilter::filter_logs);
|
||||
delegate.add_method("eth_getFilterLogs", EthFilter::filter_changes);
|
||||
delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter);
|
||||
delegate
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ authors = ["Ethcore <admin@ethcore.io"]
|
||||
[dependencies]
|
||||
ethcore-util = { path = "../util" }
|
||||
ethcore = { path = "../ethcore" }
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
ethminer = { path = "../miner" }
|
||||
log = "0.3"
|
||||
env_logger = "0.3"
|
||||
|
@ -15,7 +15,7 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
///
|
||||
/// BlockChain synchronization strategy.
|
||||
/// `BlockChain` synchronization strategy.
|
||||
/// Syncs to peers and keeps up to date.
|
||||
/// This implementation uses ethereum protocol v63
|
||||
///
|
||||
@ -127,7 +127,7 @@ pub struct SyncStatus {
|
||||
pub protocol_version: u8,
|
||||
/// The underlying p2p network version.
|
||||
pub network_id: U256,
|
||||
/// BlockChain height for the moment the sync started.
|
||||
/// `BlockChain` height for the moment the sync started.
|
||||
pub start_block_number: BlockNumber,
|
||||
/// Last fully downloaded and imported block number (if any).
|
||||
pub last_imported_block_number: Option<BlockNumber>,
|
||||
@ -217,10 +217,6 @@ pub struct ChainSync {
|
||||
network_id: U256,
|
||||
/// Miner
|
||||
miner: Arc<Miner>,
|
||||
|
||||
/// Transactions to propagate
|
||||
// TODO: reconsider where this is in the codebase - seems a little dodgy to have here.
|
||||
transactions_to_send: Vec<Bytes>,
|
||||
}
|
||||
|
||||
type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
|
||||
@ -247,7 +243,6 @@ impl ChainSync {
|
||||
max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks),
|
||||
network_id: config.network_id,
|
||||
miner: miner,
|
||||
transactions_to_send: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
@ -950,11 +945,6 @@ impl ChainSync {
|
||||
}
|
||||
}
|
||||
|
||||
/// Place a new transaction on the wire.
|
||||
pub fn new_transaction(&mut self, raw_transaction: Bytes) {
|
||||
self.transactions_to_send.push(raw_transaction);
|
||||
}
|
||||
|
||||
/// Called when peer sends us new transactions
|
||||
fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
|
||||
// accepting transactions once only fully synced
|
||||
@ -1292,15 +1282,20 @@ impl ChainSync {
|
||||
fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize {
|
||||
|
||||
// Early out of nobody to send to.
|
||||
if self.peers.len() == 0 {
|
||||
if self.peers.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let mut packet = RlpStream::new_list(self.transactions_to_send.len());
|
||||
for tx in self.transactions_to_send.iter() {
|
||||
packet.append_raw(tx, 1);
|
||||
let mut transactions = self.miner.pending_transactions();
|
||||
if transactions.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let mut packet = RlpStream::new_list(transactions.len());
|
||||
let tx_count = transactions.len();
|
||||
for tx in transactions.drain(..) {
|
||||
packet.append(&tx);
|
||||
}
|
||||
self.transactions_to_send.clear();
|
||||
let rlp = packet.out();
|
||||
|
||||
let lucky_peers = {
|
||||
@ -1312,20 +1307,19 @@ impl ChainSync {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// taking at max of MAX_PEERS_PROPAGATION
|
||||
lucky_peers.iter().map(|&id| id.clone()).take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::<Vec<PeerId>>()
|
||||
lucky_peers.iter().cloned().take(min(lucky_peers.len(), MAX_PEERS_PROPAGATION)).collect::<Vec<PeerId>>()
|
||||
};
|
||||
|
||||
let sent = lucky_peers.len();
|
||||
for peer_id in lucky_peers {
|
||||
self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp.clone());
|
||||
}
|
||||
trace!(target: "sync", "Sent {} transactions to {} peers.", tx_count, sent);
|
||||
sent
|
||||
}
|
||||
|
||||
fn propagate_latest_blocks(&mut self, io: &mut SyncIo) {
|
||||
if !self.transactions_to_send.is_empty() {
|
||||
self.propagate_new_transactions(io);
|
||||
}
|
||||
self.propagate_new_transactions(io);
|
||||
let chain_info = io.chain().chain_info();
|
||||
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
|
||||
let blocks = self.propagate_blocks(&chain_info, io);
|
||||
@ -1701,8 +1695,8 @@ mod tests {
|
||||
let retracted_blocks = vec![client.block_hash_delta_minus(1)];
|
||||
|
||||
// Add some balance to clients
|
||||
for h in vec![good_blocks[0], retracted_blocks[0]] {
|
||||
let block = client.block(BlockId::Hash(h)).unwrap();
|
||||
for h in &[good_blocks[0], retracted_blocks[0]] {
|
||||
let block = client.block(BlockId::Hash(*h)).unwrap();
|
||||
let view = BlockView::new(&block);
|
||||
client.set_balance(view.transactions()[0].sender().unwrap(), U256::from(1_000_000_000));
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ use std::ops::*;
|
||||
use std::sync::*;
|
||||
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId};
|
||||
use util::TimerToken;
|
||||
use util::{U256, Bytes, ONE_U256};
|
||||
use util::{U256, ONE_U256};
|
||||
use ethcore::client::Client;
|
||||
use ethcore::service::SyncMessage;
|
||||
use ethminer::Miner;
|
||||
@ -101,9 +101,6 @@ impl Default for SyncConfig {
|
||||
pub trait SyncProvider: Send + Sync {
|
||||
/// Get sync status
|
||||
fn status(&self) -> SyncStatus;
|
||||
|
||||
/// Note that a user has submitted a new transaction.
|
||||
fn new_transaction(&self, raw_transaction: Bytes);
|
||||
}
|
||||
|
||||
/// Ethereum network protocol handler
|
||||
@ -143,11 +140,6 @@ impl SyncProvider for EthSync {
|
||||
fn status(&self) -> SyncStatus {
|
||||
self.sync.read().unwrap().status()
|
||||
}
|
||||
|
||||
/// Note that a user has submitted a new transaction.
|
||||
fn new_transaction(&self, raw_transaction: Bytes) {
|
||||
self.sync.write().unwrap().new_transaction(raw_transaction);
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
||||
|
1
test.sh
1
test.sh
@ -7,6 +7,7 @@ cargo test --features ethcore/json-tests $1 \
|
||||
-p ethcore \
|
||||
-p ethsync \
|
||||
-p ethcore-rpc \
|
||||
-p ethcore-webapp \
|
||||
-p parity \
|
||||
-p ethminer \
|
||||
-p bigint
|
||||
|
@ -27,7 +27,7 @@ crossbeam = "0.2"
|
||||
slab = "0.1"
|
||||
sha3 = { path = "sha3" }
|
||||
serde = "0.7.0"
|
||||
clippy = { version = "0.0.54", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
json-tests = { path = "json-tests" }
|
||||
igd = "0.4.2"
|
||||
ethcore-devtools = { path = "../devtools" }
|
||||
|
@ -157,6 +157,7 @@ impl KeyPair {
|
||||
}
|
||||
|
||||
/// EC functions
|
||||
#[cfg_attr(feature="dev", allow(similar_names))]
|
||||
pub mod ec {
|
||||
use numbers::*;
|
||||
use standard::*;
|
||||
@ -193,6 +194,7 @@ pub mod ec {
|
||||
}
|
||||
Ok(signature)
|
||||
}
|
||||
|
||||
/// Verify signature.
|
||||
pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> {
|
||||
use secp256k1::*;
|
||||
@ -233,6 +235,7 @@ pub mod ec {
|
||||
}
|
||||
|
||||
/// ECDH functions
|
||||
#[cfg_attr(feature="dev", allow(similar_names))]
|
||||
pub mod ecdh {
|
||||
use crypto::*;
|
||||
use crypto::{self};
|
||||
@ -254,6 +257,7 @@ pub mod ecdh {
|
||||
}
|
||||
|
||||
/// ECIES function
|
||||
#[cfg_attr(feature="dev", allow(similar_names))]
|
||||
pub mod ecies {
|
||||
use hash::*;
|
||||
use bytes::*;
|
||||
|
@ -392,7 +392,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// BitOr on references
|
||||
/// `BitOr` on references
|
||||
impl<'a> BitOr for &'a $from {
|
||||
type Output = $from;
|
||||
|
||||
@ -408,7 +408,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// Moving BitOr
|
||||
/// Moving `BitOr`
|
||||
impl BitOr for $from {
|
||||
type Output = $from;
|
||||
|
||||
@ -417,7 +417,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// BitAnd on references
|
||||
/// `BitAnd` on references
|
||||
impl <'a> BitAnd for &'a $from {
|
||||
type Output = $from;
|
||||
|
||||
@ -433,7 +433,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// Moving BitAnd
|
||||
/// Moving `BitAnd`
|
||||
impl BitAnd for $from {
|
||||
type Output = $from;
|
||||
|
||||
@ -442,7 +442,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// BitXor on references
|
||||
/// `BitXor` on references
|
||||
impl <'a> BitXor for &'a $from {
|
||||
type Output = $from;
|
||||
|
||||
@ -458,7 +458,7 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
/// Moving BitXor
|
||||
/// Moving `BitXor`
|
||||
impl BitXor for $from {
|
||||
type Output = $from;
|
||||
|
||||
|
@ -376,8 +376,10 @@ impl<Message> IoService<Message> where Message: Send + Sync + Clone + 'static {
|
||||
|
||||
impl<Message> Drop for IoService<Message> where Message: Send + Sync + Clone {
|
||||
fn drop(&mut self) {
|
||||
trace!(target: "shutdown", "[IoService] Closing...");
|
||||
self.host_channel.send(IoMessage::Shutdown).unwrap();
|
||||
self.thread.take().unwrap().join().ok();
|
||||
trace!(target: "shutdown", "[IoService] Closed.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,10 +120,12 @@ impl Worker {
|
||||
|
||||
impl Drop for Worker {
|
||||
fn drop(&mut self) {
|
||||
trace!(target: "shutdown", "[IoWorker] Closing...");
|
||||
let _ = self.wait_mutex.lock();
|
||||
self.deleting.store(true, AtomicOrdering::Release);
|
||||
self.wait.notify_all();
|
||||
let thread = mem::replace(&mut self.thread, None).unwrap();
|
||||
thread.join().ok();
|
||||
trace!(target: "shutdown", "[IoWorker] Closed");
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Disk-backed HashDB implementation.
|
||||
//! Disk-backed `HashDB` implementation.
|
||||
|
||||
use common::*;
|
||||
use rlp::*;
|
||||
@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
|
||||
#[cfg(test)]
|
||||
use std::env;
|
||||
|
||||
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
|
||||
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
|
||||
/// and latent-removal semantics.
|
||||
///
|
||||
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
|
||||
/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
|
||||
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
|
||||
/// the removals actually take effect.
|
||||
pub struct ArchiveDB {
|
||||
@ -176,6 +176,7 @@ impl JournalDB for ArchiveDB {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||
|
||||
use common::*;
|
||||
use super::*;
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Disk-backed HashDB implementation.
|
||||
//! Disk-backed `HashDB` implementation.
|
||||
|
||||
use common::*;
|
||||
use rlp::*;
|
||||
@ -53,11 +53,11 @@ enum RemoveFrom {
|
||||
Archive,
|
||||
}
|
||||
|
||||
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
|
||||
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
|
||||
/// and latent-removal semantics.
|
||||
///
|
||||
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
|
||||
/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
|
||||
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
|
||||
/// the removals actually take effect.
|
||||
pub struct EarlyMergeDB {
|
||||
@ -528,6 +528,7 @@ impl JournalDB for EarlyMergeDB {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||
|
||||
use common::*;
|
||||
use super::*;
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! JournalDB interface and implementation.
|
||||
//! `JournalDB` interface and implementation.
|
||||
|
||||
use common::*;
|
||||
|
||||
@ -25,7 +25,7 @@ mod earlymergedb;
|
||||
mod overlayrecentdb;
|
||||
mod refcounteddb;
|
||||
|
||||
/// Export the JournalDB trait.
|
||||
/// Export the `JournalDB` trait.
|
||||
pub use self::traits::JournalDB;
|
||||
|
||||
/// A journal database algorithm.
|
||||
@ -70,7 +70,7 @@ impl fmt::Display for Algorithm {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new JournalDB trait object.
|
||||
/// Create a new `JournalDB` trait object.
|
||||
pub fn new(path: &str, algorithm: Algorithm) -> Box<JournalDB> {
|
||||
match algorithm {
|
||||
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)),
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! JournalDB over in-memory overlay
|
||||
//! `JournalDB` over in-memory overlay
|
||||
|
||||
use common::*;
|
||||
use rlp::*;
|
||||
@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
|
||||
use std::env;
|
||||
use super::JournalDB;
|
||||
|
||||
/// Implementation of the JournalDB trait for a disk-backed database with a memory overlay
|
||||
/// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay
|
||||
/// and, possibly, latent-removal semantics.
|
||||
///
|
||||
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
|
||||
/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
|
||||
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
|
||||
/// the removals actually take effect.
|
||||
///
|
||||
@ -359,6 +359,7 @@ impl HashDB for OverlayRecentDB {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||
|
||||
use common::*;
|
||||
use super::*;
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Disk-backed, ref-counted JournalDB implementation.
|
||||
//! Disk-backed, ref-counted `JournalDB` implementation.
|
||||
|
||||
use common::*;
|
||||
use rlp::*;
|
||||
@ -25,11 +25,11 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
|
||||
#[cfg(test)]
|
||||
use std::env;
|
||||
|
||||
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
|
||||
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay
|
||||
/// and latent-removal semantics.
|
||||
///
|
||||
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
|
||||
/// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to
|
||||
/// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect
|
||||
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
|
||||
/// the removals actually take effect.
|
||||
pub struct RefCountedDB {
|
||||
@ -195,6 +195,7 @@ impl JournalDB for RefCountedDB {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![cfg_attr(feature="dev", allow(blacklisted_name))]
|
||||
#![cfg_attr(feature="dev", allow(similar_names))]
|
||||
|
||||
use common::*;
|
||||
use super::*;
|
||||
|
@ -14,12 +14,12 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Disk-backed HashDB implementation.
|
||||
//! Disk-backed `HashDB` implementation.
|
||||
|
||||
use common::*;
|
||||
use hashdb::*;
|
||||
|
||||
/// A HashDB which can manage a short-term journal potentially containing many forks of mutually
|
||||
/// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually
|
||||
/// exclusive actions.
|
||||
pub trait JournalDB : HashDB + Send + Sync {
|
||||
/// Return a copy of ourself, in a box.
|
||||
|
@ -326,7 +326,7 @@ fn uuid_from_string(s: &str) -> Result<Uuid, UtilError> {
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Stored key file struct with encrypted message (cipher_text)
|
||||
/// Stored key file struct with encrypted message (`cipher_text`)
|
||||
/// also contains password derivation function settings (PBKDF2/Scrypt)
|
||||
pub struct KeyFileContent {
|
||||
version: KeyFileVersion,
|
||||
@ -369,9 +369,9 @@ enum KeyFileParseError {
|
||||
}
|
||||
|
||||
impl KeyFileContent {
|
||||
/// New stored key file struct with encrypted message (cipher_text)
|
||||
/// New stored key file struct with encrypted message (`cipher_text`)
|
||||
/// also contains password derivation function settings (PBKDF2/Scrypt)
|
||||
/// to decrypt cipher_text given the password is provided.
|
||||
/// to decrypt `cipher_text` given the password is provided.
|
||||
pub fn new(crypto: KeyFileCrypto) -> KeyFileContent {
|
||||
KeyFileContent {
|
||||
id: new_uuid(),
|
||||
|
@ -128,7 +128,7 @@ impl Default for AccountService {
|
||||
}
|
||||
|
||||
impl AccountService {
|
||||
/// New account service with the default location
|
||||
/// New account service with the keys store in default location
|
||||
pub fn new() -> Self {
|
||||
let secret_store = RwLock::new(SecretStore::new());
|
||||
secret_store.write().unwrap().try_import_existing();
|
||||
@ -137,6 +137,15 @@ impl AccountService {
|
||||
}
|
||||
}
|
||||
|
||||
/// New account service with the keys store in specific location
|
||||
pub fn new_in(path: &Path) -> Self {
|
||||
let secret_store = RwLock::new(SecretStore::new_in(path));
|
||||
secret_store.write().unwrap().try_import_existing();
|
||||
AccountService {
|
||||
secret_store: secret_store
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn new_test(temp: &::devtools::RandomTempPath) -> Self {
|
||||
let secret_store = RwLock::new(SecretStore::new_test(temp));
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Key-Value store abstraction with RocksDB backend.
|
||||
//! Key-Value store abstraction with `RocksDB` backend.
|
||||
|
||||
use std::default::Default;
|
||||
use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator,
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Reference-counted memory-based HashDB implementation.
|
||||
//! Reference-counted memory-based `HashDB` implementation.
|
||||
|
||||
use hash::*;
|
||||
use bytes::*;
|
||||
@ -27,7 +27,7 @@ use std::collections::HashMap;
|
||||
use std::default::Default;
|
||||
|
||||
#[derive(Debug,Clone)]
|
||||
/// Reference-counted memory-based HashDB implementation.
|
||||
/// Reference-counted memory-based `HashDB` implementation.
|
||||
///
|
||||
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
|
||||
/// with `remove()`, check for existence with `containce()` and lookup a hash to derive
|
||||
|
@ -223,7 +223,7 @@ pub enum WriteStatus {
|
||||
Complete
|
||||
}
|
||||
|
||||
/// RLPx packet
|
||||
/// `RLPx` packet
|
||||
pub struct Packet {
|
||||
pub protocol: u16,
|
||||
pub data: Bytes,
|
||||
@ -237,7 +237,7 @@ enum EncryptedConnectionState {
|
||||
Payload,
|
||||
}
|
||||
|
||||
/// Connection implementing RLPx framing
|
||||
/// Connection implementing `RLPx` framing
|
||||
/// https://github.com/ethereum/devp2p/blob/master/rlpx.md#framing
|
||||
pub struct EncryptedConnection {
|
||||
/// Underlying tcp connection
|
||||
|
@ -48,7 +48,7 @@ enum HandshakeState {
|
||||
StartSession,
|
||||
}
|
||||
|
||||
/// RLPx protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake
|
||||
/// `RLPx` protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake
|
||||
pub struct Handshake {
|
||||
/// Remote node public key
|
||||
pub id: NodeId,
|
||||
@ -66,11 +66,11 @@ pub struct Handshake {
|
||||
pub remote_ephemeral: Public,
|
||||
/// Remote connection nonce.
|
||||
pub remote_nonce: H256,
|
||||
/// Remote RLPx protocol version.
|
||||
/// Remote `RLPx` protocol version.
|
||||
pub remote_version: u64,
|
||||
/// A copy of received encryped auth packet
|
||||
/// A copy of received encryped auth packet
|
||||
pub auth_cipher: Bytes,
|
||||
/// A copy of received encryped ack packet
|
||||
/// A copy of received encryped ack packet
|
||||
pub ack_cipher: Bytes,
|
||||
/// This Handshake is marked for deleteion flag
|
||||
pub expired: bool,
|
||||
@ -413,7 +413,7 @@ mod test {
|
||||
fn test_handshake_auth_plain() {
|
||||
let mut h = create_handshake(None);
|
||||
let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap();
|
||||
let auth =
|
||||
let auth =
|
||||
"\
|
||||
048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf\
|
||||
913150cfc777ce0ce4af2758bf4810235f6e6ceccfee1acc6b22c005e9e3a49d6448610a58e98744\
|
||||
@ -434,7 +434,7 @@ mod test {
|
||||
fn test_handshake_auth_eip8() {
|
||||
let mut h = create_handshake(None);
|
||||
let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap();
|
||||
let auth =
|
||||
let auth =
|
||||
"\
|
||||
01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b\
|
||||
0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84\
|
||||
@ -460,7 +460,7 @@ mod test {
|
||||
fn test_handshake_auth_eip8_2() {
|
||||
let mut h = create_handshake(None);
|
||||
let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap();
|
||||
let auth =
|
||||
let auth =
|
||||
"\
|
||||
01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7\
|
||||
2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf\
|
||||
@ -481,7 +481,7 @@ mod test {
|
||||
h.read_auth_eip8(&secret, &auth[super::V4_AUTH_PACKET_SIZE..]).unwrap();
|
||||
assert_eq!(h.state, super::HandshakeState::StartSession);
|
||||
check_auth(&h, 56);
|
||||
let ack = h.ack_cipher.clone();
|
||||
let ack = h.ack_cipher.clone();
|
||||
let total = (((ack[0] as u16) << 8 | (ack[1] as u16)) as usize) + 2;
|
||||
assert_eq!(ack.len(), total);
|
||||
}
|
||||
@ -491,7 +491,7 @@ mod test {
|
||||
let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap();
|
||||
let mut h = create_handshake(Some(&remote));
|
||||
let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap();
|
||||
let ack =
|
||||
let ack =
|
||||
"\
|
||||
049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662\
|
||||
b0ff2c08e9006d5a11a278b1b3331e5aaabf0a32f01281b6f4ede0e09a2d5f585b26513cb794d963\
|
||||
@ -511,7 +511,7 @@ mod test {
|
||||
let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap();
|
||||
let mut h = create_handshake(Some(&remote));
|
||||
let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap();
|
||||
let ack =
|
||||
let ack =
|
||||
"\
|
||||
01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470\
|
||||
b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de\
|
||||
@ -540,7 +540,7 @@ mod test {
|
||||
let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap();
|
||||
let mut h = create_handshake(Some(&remote));
|
||||
let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap();
|
||||
let ack =
|
||||
let ack =
|
||||
"\
|
||||
01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7\
|
||||
ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0\
|
||||
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Disk-backed HashDB implementation.
|
||||
//! Disk-backed `HashDB` implementation.
|
||||
|
||||
use error::*;
|
||||
use hash::*;
|
||||
@ -28,7 +28,7 @@ use std::env;
|
||||
use std::collections::HashMap;
|
||||
use kvdb::{Database, DBTransaction};
|
||||
|
||||
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay.
|
||||
/// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay.
|
||||
///
|
||||
/// The operations `insert()` and `remove()` take place on the memory overlay; batches of
|
||||
/// such operations may be flushed to the disk-backed DB with `commit()` or discarded with
|
||||
|
@ -153,7 +153,7 @@ impl <T>ToBytes for T where T: FixedHash {
|
||||
fn to_bytes_len(&self) -> usize { self.bytes().len() }
|
||||
}
|
||||
|
||||
/// Error returned when FromBytes conversation goes wrong
|
||||
/// Error returned when `FromBytes` conversation goes wrong
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum FromBytesError {
|
||||
/// Expected more RLP data
|
||||
@ -174,7 +174,7 @@ impl fmt::Display for FromBytesError {
|
||||
}
|
||||
}
|
||||
|
||||
/// Alias for the result of FromBytes trait
|
||||
/// Alias for the result of `FromBytes` trait
|
||||
pub type FromBytesResult<T> = Result<T, FromBytesError>;
|
||||
|
||||
/// Converts to given type from its bytes representation
|
||||
|
@ -22,8 +22,8 @@ use super::triedb::*;
|
||||
use super::trietraits::*;
|
||||
|
||||
/// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
|
||||
///
|
||||
/// Use it as a `Trie` trait object. You can use `raw()` to get the backing TrieDB object.
|
||||
///
|
||||
/// Use it as a `Trie` trait object. You can use `raw()` to get the backing `TrieDB` object.
|
||||
pub struct SecTrieDB<'db> {
|
||||
raw: TrieDB<'db>
|
||||
}
|
||||
@ -32,16 +32,16 @@ impl<'db> SecTrieDB<'db> {
|
||||
/// Create a new trie with the backing database `db` and empty `root`
|
||||
/// Initialise to the state entailed by the genesis block.
|
||||
/// This guarantees the trie is built correctly.
|
||||
pub fn new(db: &'db HashDB, root: &'db H256) -> Self {
|
||||
pub fn new(db: &'db HashDB, root: &'db H256) -> Self {
|
||||
SecTrieDB { raw: TrieDB::new(db, root) }
|
||||
}
|
||||
|
||||
/// Get a reference to the underlying raw TrieDB struct.
|
||||
/// Get a reference to the underlying raw `TrieDB` struct.
|
||||
pub fn raw(&self) -> &TrieDB {
|
||||
&self.raw
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the underlying raw TrieDB struct.
|
||||
/// Get a mutable reference to the underlying raw `TrieDB` struct.
|
||||
pub fn raw_mut(&mut self) -> &TrieDB {
|
||||
&mut self.raw
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ use super::triedbmut::*;
|
||||
use super::trietraits::*;
|
||||
|
||||
/// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
|
||||
///
|
||||
/// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing TrieDBMut object.
|
||||
///
|
||||
/// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing `TrieDBMut` object.
|
||||
pub struct SecTrieDBMut<'db> {
|
||||
raw: TrieDBMut<'db>
|
||||
}
|
||||
@ -32,7 +32,7 @@ impl<'db> SecTrieDBMut<'db> {
|
||||
/// Create a new trie with the backing database `db` and empty `root`
|
||||
/// Initialise to the state entailed by the genesis block.
|
||||
/// This guarantees the trie is built correctly.
|
||||
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
|
||||
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
|
||||
SecTrieDBMut { raw: TrieDBMut::new(db, root) }
|
||||
}
|
||||
|
||||
|
26
webapp/Cargo.toml
Normal file
26
webapp/Cargo.toml
Normal file
@ -0,0 +1,26 @@
|
||||
[package]
|
||||
description = "Parity WebApplications crate"
|
||||
name = "ethcore-webapp"
|
||||
version = "1.1.0"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Ethcore <admin@ethcore.io"]
|
||||
|
||||
[lib]
|
||||
|
||||
[dependencies]
|
||||
log = "0.3"
|
||||
jsonrpc-core = "2.0"
|
||||
jsonrpc-http-server = { git = "https://github.com/tomusdrw/jsonrpc-http-server.git", branch="old-hyper" }
|
||||
hyper = { version = "0.8", default-features = false }
|
||||
iron = { version = "0.3" }
|
||||
ethcore-rpc = { path = "../rpc" }
|
||||
ethcore-util = { path = "../util" }
|
||||
parity-webapp = { git = "https://github.com/tomusdrw/parity-webapp.git" }
|
||||
# List of apps
|
||||
parity-status = { git = "https://github.com/tomusdrw/parity-status.git", version = "0.1.4" }
|
||||
parity-wallet = { git = "https://github.com/tomusdrw/parity-wallet.git", optional = true }
|
||||
clippy = { version = "0.0.61", optional = true}
|
||||
|
||||
[features]
|
||||
default = ["parity-wallet"]
|
||||
dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"]
|
41
webapp/src/apps.rs
Normal file
41
webapp/src/apps.rs
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use page::{Page, PageHandler};
|
||||
|
||||
extern crate parity_status;
|
||||
extern crate parity_wallet;
|
||||
|
||||
pub type Pages = HashMap<String, Box<Page>>;
|
||||
|
||||
pub fn main_page() -> Box<Page> {
|
||||
Box::new(PageHandler { app: parity_status::App::default() })
|
||||
}
|
||||
|
||||
pub fn all_pages() -> Pages {
|
||||
let mut pages = Pages::new();
|
||||
wallet_page(&mut pages);
|
||||
pages
|
||||
}
|
||||
|
||||
#[cfg(feature = "parity-wallet")]
|
||||
fn wallet_page(pages: &mut Pages) {
|
||||
pages.insert("wallet".to_owned(), Box::new(PageHandler { app: parity_wallet::App::default() }));
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "parity-wallet"))]
|
||||
fn wallet_page(_pages: &mut Pages) {}
|
99
webapp/src/lib.rs
Normal file
99
webapp/src/lib.rs
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Ethcore Webapplications for Parity
|
||||
#![warn(missing_docs)]
|
||||
#![cfg_attr(feature="nightly", plugin(clippy))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate hyper;
|
||||
extern crate iron;
|
||||
extern crate jsonrpc_core;
|
||||
extern crate jsonrpc_http_server;
|
||||
extern crate ethcore_rpc as rpc;
|
||||
extern crate parity_webapp;
|
||||
|
||||
use std::sync::Arc;
|
||||
use self::jsonrpc_core::{IoHandler, IoDelegate};
|
||||
use jsonrpc_http_server::ServerHandler;
|
||||
|
||||
mod apps;
|
||||
mod page;
|
||||
mod router;
|
||||
|
||||
/// Http server.
|
||||
pub struct WebappServer {
|
||||
handler: Arc<IoHandler>,
|
||||
}
|
||||
|
||||
impl WebappServer {
|
||||
/// Construct new http server object
|
||||
pub fn new() -> Self {
|
||||
WebappServer {
|
||||
handler: Arc::new(IoHandler::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add io delegate.
|
||||
pub fn add_delegate<D>(&self, delegate: IoDelegate<D>) where D: Send + Sync + 'static {
|
||||
self.handler.add_delegate(delegate);
|
||||
}
|
||||
|
||||
/// Start server asynchronously and returns result with `Listening` handle on success or an error.
|
||||
pub fn start_http(&self, addr: &str, threads: usize) -> Result<Listening, WebappServerError> {
|
||||
let addr = addr.to_owned();
|
||||
let handler = self.handler.clone();
|
||||
|
||||
let cors_domain = jsonrpc_http_server::AccessControlAllowOrigin::Null;
|
||||
let rpc = ServerHandler::new(handler, cors_domain);
|
||||
let router = router::Router::new(rpc, apps::main_page(), apps::all_pages());
|
||||
|
||||
try!(hyper::Server::http(addr.as_ref() as &str))
|
||||
.handle_threads(router, threads)
|
||||
.map(|l| Listening { listening: l })
|
||||
.map_err(WebappServerError::from)
|
||||
}
|
||||
}
|
||||
|
||||
/// Listening handle
|
||||
pub struct Listening {
|
||||
listening: hyper::server::Listening
|
||||
}
|
||||
|
||||
impl Drop for Listening {
|
||||
fn drop(&mut self) {
|
||||
self.listening.close().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Webapp Server startup error
|
||||
#[derive(Debug)]
|
||||
pub enum WebappServerError {
|
||||
/// Wrapped `std::io::Error`
|
||||
IoError(std::io::Error),
|
||||
/// Other `hyper` error
|
||||
Other(hyper::error::Error),
|
||||
}
|
||||
|
||||
impl From<hyper::error::Error> for WebappServerError {
|
||||
fn from(err: hyper::error::Error) -> Self {
|
||||
match err {
|
||||
hyper::error::Error::Io(e) => WebappServerError::IoError(e),
|
||||
e => WebappServerError::Other(e),
|
||||
}
|
||||
}
|
||||
}
|
67
webapp/src/page/mod.rs
Normal file
67
webapp/src/page/mod.rs
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::io::Write;
|
||||
use hyper::uri::RequestUri;
|
||||
use hyper::server;
|
||||
use hyper::header;
|
||||
use hyper::status::StatusCode;
|
||||
use parity_webapp::WebApp;
|
||||
|
||||
pub trait Page : Send + Sync {
|
||||
fn serve_file(&self, mut path: &str, mut res: server::Response);
|
||||
}
|
||||
|
||||
pub struct PageHandler<T : WebApp> {
|
||||
pub app: T,
|
||||
}
|
||||
|
||||
impl<T: WebApp> Page for PageHandler<T> {
|
||||
fn serve_file(&self, mut path: &str, mut res: server::Response) {
|
||||
// Support index file
|
||||
if path == "" {
|
||||
path = "index.html"
|
||||
}
|
||||
let file = self.app.file(path);
|
||||
if let Some(f) = file {
|
||||
*res.status_mut() = StatusCode::Ok;
|
||||
res.headers_mut().set(header::ContentType(f.content_type.parse().unwrap()));
|
||||
|
||||
let _ = match res.start() {
|
||||
Ok(mut raw_res) => {
|
||||
for chunk in f.content.chunks(1024 * 20) {
|
||||
let _ = raw_res.write(chunk);
|
||||
}
|
||||
raw_res.end()
|
||||
},
|
||||
Err(_) => {
|
||||
println!("Error while writing response.");
|
||||
Ok(())
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl server::Handler for Page {
|
||||
fn handle(&self, req: server::Request, mut res: server::Response) {
|
||||
*res.status_mut() = StatusCode::NotFound;
|
||||
|
||||
if let RequestUri::AbsolutePath(ref path) = req.uri {
|
||||
self.serve_file(path, res);
|
||||
}
|
||||
}
|
||||
}
|
53
webapp/src/router/api.rs
Normal file
53
webapp/src/router/api.rs
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Simple REST API
|
||||
|
||||
use std::sync::Arc;
|
||||
use hyper;
|
||||
use hyper::status::StatusCode;
|
||||
use hyper::header;
|
||||
use hyper::uri::RequestUri::AbsolutePath as Path;
|
||||
use apps::Pages;
|
||||
|
||||
pub struct RestApi {
|
||||
pub pages: Arc<Pages>,
|
||||
}
|
||||
|
||||
impl RestApi {
|
||||
fn list_pages(&self) -> String {
|
||||
let mut s = "[".to_owned();
|
||||
for name in self.pages.keys() {
|
||||
s.push_str(&format!("\"{}\",", name));
|
||||
}
|
||||
s.push_str("\"rpc\"");
|
||||
s.push_str("]");
|
||||
s
|
||||
}
|
||||
}
|
||||
|
||||
impl hyper::server::Handler for RestApi {
|
||||
fn handle<'b, 'a>(&'a self, req: hyper::server::Request<'a, 'b>, mut res: hyper::server::Response<'a>) {
|
||||
match req.uri {
|
||||
Path(ref path) if path == "apps" => {
|
||||
*res.status_mut() = StatusCode::Ok;
|
||||
res.headers_mut().set(header::ContentType("application/json".parse().unwrap()));
|
||||
let _ = res.send(self.list_pages().as_bytes());
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
109
webapp/src/router/mod.rs
Normal file
109
webapp/src/router/mod.rs
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Router implementation
|
||||
|
||||
use std::sync::Arc;
|
||||
use hyper;
|
||||
use page::Page;
|
||||
use apps::Pages;
|
||||
use iron::request::Url;
|
||||
use jsonrpc_http_server::ServerHandler;
|
||||
|
||||
mod api;
|
||||
|
||||
pub struct Router {
|
||||
rpc: ServerHandler,
|
||||
api: api::RestApi,
|
||||
main_page: Box<Page>,
|
||||
pages: Arc<Pages>,
|
||||
}
|
||||
|
||||
impl hyper::server::Handler for Router {
|
||||
fn handle<'b, 'a>(&'a self, req: hyper::server::Request<'a, 'b>, res: hyper::server::Response<'a>) {
|
||||
let (path, req) = Router::extract_request_path(req);
|
||||
match path {
|
||||
Some(ref url) if self.pages.contains_key(url) => {
|
||||
self.pages.get(url).unwrap().handle(req, res);
|
||||
},
|
||||
Some(ref url) if url == "api" => {
|
||||
self.api.handle(req, res);
|
||||
},
|
||||
_ if req.method == hyper::method::Method::Post => {
|
||||
self.rpc.handle(req, res)
|
||||
},
|
||||
_ => self.main_page.handle(req, res),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Router {
|
||||
pub fn new(rpc: ServerHandler, main_page: Box<Page>, pages: Pages) -> Self {
|
||||
let pages = Arc::new(pages);
|
||||
Router {
|
||||
rpc: rpc,
|
||||
api: api::RestApi { pages: pages.clone() },
|
||||
main_page: main_page,
|
||||
pages: pages,
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_url(req: &hyper::server::Request) -> Option<Url> {
|
||||
match req.uri {
|
||||
hyper::uri::RequestUri::AbsoluteUri(ref url) => {
|
||||
match Url::from_generic_url(url.clone()) {
|
||||
Ok(url) => Some(url),
|
||||
_ => None,
|
||||
}
|
||||
},
|
||||
hyper::uri::RequestUri::AbsolutePath(ref path) => {
|
||||
// Attempt to prepend the Host header (mandatory in HTTP/1.1)
|
||||
let url_string = match req.headers.get::<hyper::header::Host>() {
|
||||
Some(ref host) => {
|
||||
format!("http://{}:{}{}", host.hostname, host.port.unwrap_or(80), path)
|
||||
},
|
||||
None => return None,
|
||||
};
|
||||
|
||||
match Url::parse(&url_string) {
|
||||
Ok(url) => Some(url),
|
||||
_ => None,
|
||||
}
|
||||
},
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_request_path<'a, 'b>(mut req: hyper::server::Request<'a, 'b>) -> (Option<String>, hyper::server::Request<'a, 'b>) {
|
||||
let url = Router::extract_url(&req);
|
||||
match url {
|
||||
Some(ref url) if url.path.len() > 1 => {
|
||||
let part = url.path[0].clone();
|
||||
let url = url.path[1..].join("/");
|
||||
req.uri = hyper::uri::RequestUri::AbsolutePath(url);
|
||||
(Some(part), req)
|
||||
},
|
||||
Some(url) => {
|
||||
let url = url.path.join("/");
|
||||
req.uri = hyper::uri::RequestUri::AbsolutePath(url);
|
||||
(None, req)
|
||||
},
|
||||
_ => {
|
||||
(None, req)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user