mixed merge and changes...

This commit is contained in:
keorn 2016-10-11 18:37:31 +01:00
parent 1f56588b87
commit e343153f06
58 changed files with 1397 additions and 621 deletions

View File

@ -5,6 +5,7 @@ variables:
GIT_DEPTH: "3" GIT_DEPTH: "3"
SIMPLECOV: "true" SIMPLECOV: "true"
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
RUSTFLAGS: "-D warnings"
cache: cache:
key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME" key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME"
untracked: true untracked: true
@ -264,7 +265,7 @@ windows:
- set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt - set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt
- set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64 - set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64
- set RUST_BACKTRACE=1 - set RUST_BACKTRACE=1
- set RUSTFLAGS=-Zorbit=off - set RUSTFLAGS=%RUSTFLAGS% -Zorbit=off
- rustup default stable-x86_64-pc-windows-msvc - rustup default stable-x86_64-pc-windows-msvc
- cargo build --release --verbose - cargo build --release --verbose
- cmd md5sum target\release\parity >> checksum - cmd md5sum target\release\parity >> checksum

View File

@ -31,6 +31,7 @@ env:
- RUN_COVERAGE="false" - RUN_COVERAGE="false"
- RUN_DOCS="false" - RUN_DOCS="false"
- TEST_OPTIONS="" - TEST_OPTIONS=""
- RUSTFLAGS="-D warnings"
# GH_TOKEN for documentation # GH_TOKEN for documentation
- secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw= - secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw=
- KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov" - KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov"

34
Cargo.lock generated
View File

@ -240,7 +240,7 @@ version = "0.5.4"
source = "git+https://github.com/ethcore/rust-secp256k1#a9a0b1be1f39560ca86e8fc8e55e205a753ff25c" source = "git+https://github.com/ethcore/rust-secp256k1#a9a0b1be1f39560ca86e8fc8e55e205a753ff25c"
dependencies = [ dependencies = [
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -308,7 +308,7 @@ dependencies = [
[[package]] [[package]]
name = "ethcore-bigint" name = "ethcore-bigint"
version = "0.1.0" version = "0.1.1"
dependencies = [ dependencies = [
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
@ -399,6 +399,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)", "nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -538,7 +539,7 @@ dependencies = [
"elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0", "ethcore-bigint 0.1.1",
"ethcore-bloom-journal 0.1.0", "ethcore-bloom-journal 0.1.0",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -567,7 +568,7 @@ name = "ethcrypto"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0", "ethcore-bigint 0.1.1",
"ethkey 0.2.0", "ethkey 0.2.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
@ -590,7 +591,7 @@ version = "0.2.0"
dependencies = [ dependencies = [
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0", "ethcore-bigint 0.1.1",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -674,8 +675,11 @@ dependencies = [
[[package]] [[package]]
name = "gcc" name = "gcc"
version = "0.3.28" version = "0.3.35"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "glob" name = "glob"
@ -936,7 +940,7 @@ name = "miniz-sys"
version = "0.1.7" version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1022,7 +1026,7 @@ name = "nanomsg-sys"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ethcore/nanomsg.rs.git#c40fe442c9afaea5b38009a3d992ca044dcceb00" source = "git+https://github.com/ethcore/nanomsg.rs.git#c40fe442c9afaea5b38009a3d992ca044dcceb00"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1394,7 +1398,7 @@ name = "rlp"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.0", "ethcore-bigint 0.1.1",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1402,7 +1406,7 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb" name = "rocksdb"
version = "0.4.5" version = "0.4.5"
source = "git+https://github.com/ethcore/rust-rocksdb#485dd747a2c9a9f910fc8ac696fc9edf5fa22aa3" source = "git+https://github.com/ethcore/rust-rocksdb#ffc7c82380fe8569f85ae6743f7f620af2d4a679"
dependencies = [ dependencies = [
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)", "rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)",
@ -1411,9 +1415,9 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb-sys" name = "rocksdb-sys"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ethcore/rust-rocksdb#485dd747a2c9a9f910fc8ac696fc9edf5fa22aa3" source = "git+https://github.com/ethcore/rust-rocksdb#ffc7c82380fe8569f85ae6743f7f620af2d4a679"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1444,7 +1448,7 @@ name = "rust-crypto"
version = "0.2.36" version = "0.2.36"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1537,7 +1541,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "sha3" name = "sha3"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1926,7 +1930,7 @@ dependencies = [
"checksum eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)" = "<none>" "checksum eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)" = "<none>"
"checksum ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0c53453517f620847be51943db329276ae52f2e210cfc659e81182864be2f" "checksum ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0c53453517f620847be51943db329276ae52f2e210cfc659e81182864be2f"
"checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb"
"checksum gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)" = "3da3a2cbaeb01363c8e3704fd9fd0eb2ceb17c6f27abd4c1ef040fb57d20dc79" "checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312"
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
"checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1"
"checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" "checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c"

View File

@ -6,7 +6,7 @@ environment:
certpass: certpass:
secure: 0BgXJqxq9Ei34/hZ7121FQ== secure: 0BgXJqxq9Ei34/hZ7121FQ==
keyfile: C:\users\appveyor\Certificates.p12 keyfile: C:\users\appveyor\Certificates.p12
RUSTFLAGS: -Zorbit=off RUSTFLAGS: -Zorbit=off -D warnings
branches: branches:
only: only:

View File

@ -92,11 +92,14 @@ impl server::Handler<net::HttpStream> for RestApiRouter {
} }
let url = url.expect("Check for None early-exists above; qed"); let url = url.expect("Check for None early-exists above; qed");
let path = self.path.take().expect("on_request called only once, and path is always defined in new; qed"); let mut path = self.path.take().expect("on_request called only once, and path is always defined in new; qed");
let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed"); let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed");
let endpoint = url.path.get(1).map(|v| v.as_str()); let endpoint = url.path.get(1).map(|v| v.as_str());
let hash = url.path.get(2).map(|v| v.as_str()); let hash = url.path.get(2).map(|v| v.as_str());
// at this point path.app_id contains 'api', adjust it to the hash properly, otherwise
// we will try and retrieve 'api' as the hash when doing the /api/content route
if let Some(hash) = hash.clone() { path.app_id = hash.to_owned() }
let handler = endpoint.and_then(|v| match v { let handler = endpoint.and_then(|v| match v {
"apps" => Some(as_json(&self.api.list_apps())), "apps" => Some(as_json(&self.api.list_apps())),

View File

@ -122,7 +122,7 @@ impl<R: URLHint> ContentFetcher<R> {
}, },
// We need to start fetching app // We need to start fetching app
None => { None => {
trace!(target: "dapps", "Content unavailable. Fetching..."); trace!(target: "dapps", "Content unavailable. Fetching... {:?}", content_id);
let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true."); let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true.");
let content = self.resolver.resolve(content_hex); let content = self.resolver.resolve(content_hex);
@ -415,4 +415,3 @@ mod tests {
assert_eq!(fetcher.contains("test3"), false); assert_eq!(fetcher.contains("test3"), false);
} }
} }

View File

@ -18,7 +18,7 @@ extern crate ethcore_ipc_codegen;
fn main() { fn main() {
ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap(); ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap();
ethcore_ipc_codegen::derive_ipc("src/client/traits.rs").unwrap(); ethcore_ipc_codegen::derive_ipc_cond("src/client/traits.rs", cfg!(feature="ipc")).unwrap();
ethcore_ipc_codegen::derive_ipc("src/snapshot/snapshot_service_trait.rs").unwrap(); ethcore_ipc_codegen::derive_ipc_cond("src/snapshot/snapshot_service_trait.rs", cfg!(feature="ipc")).unwrap();
ethcore_ipc_codegen::derive_ipc("src/client/chain_notify.rs").unwrap(); ethcore_ipc_codegen::derive_ipc_cond("src/client/chain_notify.rs", cfg!(feature="ipc")).unwrap();
} }

View File

@ -149,13 +149,6 @@ pub struct Client {
/// assume finality of a given candidate. /// assume finality of a given candidate.
pub const HISTORY: u64 = 1200; pub const HISTORY: u64 = 1200;
/// Append a path element to the given path and return the string.
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
let mut p = path.as_ref().to_path_buf();
p.push(item);
p.to_str().unwrap().to_owned()
}
impl Client { impl Client {
/// Create a new client with given spec and DB path and custom verifier. /// Create a new client with given spec and DB path and custom verifier.
pub fn new( pub fn new(
@ -169,7 +162,7 @@ impl Client {
let path = path.to_path_buf(); let path = path.to_path_buf();
let gb = spec.genesis_block(); let gb = spec.genesis_block();
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); let db = Arc::new(try!(Database::open(&db_config, &path.to_str().expect("DB path could not be converted to string.")).map_err(ClientError::Database)));
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone(), spec.engine.clone())); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone(), spec.engine.clone()));
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
@ -298,31 +291,27 @@ impl Client {
// Check if Parent is in chain // Check if Parent is in chain
let chain_has_parent = chain.block_header(header.parent_hash()); let chain_has_parent = chain.block_header(header.parent_hash());
if let None = chain_has_parent { if let Some(parent) = chain_has_parent {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash());
return Err(());
};
// Enact Verified Block // Enact Verified Block
let parent = chain_has_parent.unwrap();
let last_hashes = self.build_last_hashes(header.parent_hash().clone()); let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let is_canon = header.parent_hash() == &chain.best_block_hash(); let db = self.state_db.lock().boxed_clone_canon(&header.parent_hash());
let db = if is_canon { self.state_db.lock().boxed_clone_canon() } else { self.state_db.lock().boxed_clone() };
let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone());
if let Err(e) = enact_result { let locked_block = try!(enact_result.map_err(|e| {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); }));
};
// Final Verification // Final Verification
let locked_block = enact_result.unwrap();
if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
} }
Ok(locked_block) Ok(locked_block)
} else {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash());
Err(())
}
} }
fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) { fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) {
@ -366,23 +355,21 @@ impl Client {
for block in blocks { for block in blocks {
let header = &block.header; let header = &block.header;
if invalid_blocks.contains(header.parent_hash()) { let is_invalid = invalid_blocks.contains(header.parent_hash());
if is_invalid {
invalid_blocks.insert(header.hash()); invalid_blocks.insert(header.hash());
continue; continue;
} }
let closed_block = self.check_and_close_block(&block); if let Ok(closed_block) = self.check_and_close_block(&block) {
if let Err(_) = closed_block {
invalid_blocks.insert(header.hash());
continue;
}
let closed_block = closed_block.unwrap();
imported_blocks.push(header.hash()); imported_blocks.push(header.hash());
let route = self.commit_block(closed_block, &header.hash(), &block.bytes); let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
import_results.push(route); import_results.push(route);
self.report.write().accrue_block(&block); self.report.write().accrue_block(&block);
} else {
invalid_blocks.insert(header.hash());
}
} }
let imported = imported_blocks.len(); let imported = imported_blocks.len();
@ -432,7 +419,7 @@ impl Client {
// Are we committing an era? // Are we committing an era?
let ancient = if number >= HISTORY { let ancient = if number >= HISTORY {
let n = number - HISTORY; let n = number - HISTORY;
Some((n, chain.block_hash(n).unwrap())) Some((n, chain.block_hash(n).expect("only verified blocks can be commited; verified block has hash; qed")))
} else { } else {
None None
}; };
@ -461,6 +448,8 @@ impl Client {
enacted: route.enacted.clone(), enacted: route.enacted.clone(),
retracted: route.retracted.len() retracted: route.retracted.len()
}); });
let is_canon = route.enacted.last().map_or(false, |h| h == hash);
state.sync_cache(&route.enacted, &route.retracted, is_canon);
// Final commit to the DB // Final commit to the DB
self.db.read().write_buffered(batch); self.db.read().write_buffered(batch);
chain.commit(); chain.commit();
@ -535,9 +524,11 @@ impl Client {
/// Get a copy of the best block's state. /// Get a copy of the best block's state.
pub fn state(&self) -> State { pub fn state(&self) -> State {
let header = self.best_block_header();
let header = HeaderView::new(&header);
State::from_existing( State::from_existing(
self.state_db.lock().boxed_clone(), self.state_db.lock().boxed_clone_canon(&header.hash()),
HeaderView::new(&self.best_block_header()).state_root(), header.state_root(),
self.engine.account_start_nonce(), self.engine.account_start_nonce(),
self.factories.clone()) self.factories.clone())
.expect("State root of best block header always valid.") .expect("State root of best block header always valid.")
@ -899,8 +890,10 @@ impl BlockChainClient for Client {
BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index) BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)
}); });
match (t, chain.transaction_receipt(&address)) { let tx_and_sender = t.and_then(|tx| tx.sender().ok().map(|sender| (tx, sender)));
(Some(tx), Some(receipt)) => {
match (tx_and_sender, chain.transaction_receipt(&address)) {
(Some((tx, sender)), Some(receipt)) => {
let block_hash = tx.block_hash.clone(); let block_hash = tx.block_hash.clone();
let block_number = tx.block_number.clone(); let block_number = tx.block_number.clone();
let transaction_hash = tx.hash(); let transaction_hash = tx.hash();
@ -922,7 +915,7 @@ impl BlockChainClient for Client {
gas_used: receipt.gas_used - prior_gas_used, gas_used: receipt.gas_used - prior_gas_used,
contract_address: match tx.action { contract_address: match tx.action {
Action::Call(_) => None, Action::Call(_) => None,
Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)) Action::Create => Some(contract_address(&sender, &tx.nonce))
}, },
logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry {
entry: log, entry: log,
@ -1023,17 +1016,18 @@ impl BlockChainClient for Client {
let start = self.block_number(filter.range.start); let start = self.block_number(filter.range.start);
let end = self.block_number(filter.range.end); let end = self.block_number(filter.range.end);
if start.is_some() && end.is_some() { match (start, end) {
(Some(s), Some(e)) => {
let filter = trace::Filter { let filter = trace::Filter {
range: start.unwrap() as usize..end.unwrap() as usize, range: s as usize..e as usize,
from_address: From::from(filter.from_address), from_address: From::from(filter.from_address),
to_address: From::from(filter.to_address), to_address: From::from(filter.to_address),
}; };
let traces = self.tracedb.read().filter(&filter); let traces = self.tracedb.read().filter(&filter);
Some(traces) Some(traces)
} else { },
None _ => None,
} }
} }
@ -1080,7 +1074,7 @@ impl BlockChainClient for Client {
} }
fn pending_transactions(&self) -> Vec<SignedTransaction> { fn pending_transactions(&self) -> Vec<SignedTransaction> {
self.miner.pending_transactions() self.miner.pending_transactions(self.chain.read().best_block_number())
} }
// TODO: Make it an actual queue, return errors. // TODO: Make it an actual queue, return errors.
@ -1109,7 +1103,7 @@ impl MiningBlockChainClient for Client {
engine, engine,
self.factories.clone(), self.factories.clone(),
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
self.state_db.lock().boxed_clone(), self.state_db.lock().boxed_clone_canon(&h),
&chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), &chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"),
self.build_last_hashes(h.clone()), self.build_last_hashes(h.clone()),
author, author,
@ -1120,11 +1114,15 @@ impl MiningBlockChainClient for Client {
// Add uncles // Add uncles
chain chain
.find_uncle_headers(&h, engine.maximum_uncle_age()) .find_uncle_headers(&h, engine.maximum_uncle_age())
.unwrap() .unwrap_or_else(Vec::new)
.into_iter() .into_iter()
.take(engine.maximum_uncle_count()) .take(engine.maximum_uncle_count())
.foreach(|h| { .foreach(|h| {
open_block.push_uncle(h).unwrap(); open_block.push_uncle(h).expect("pushing maximum_uncle_count;
open_block was just created;
push_uncle is not ok only if more than maximum_uncle_count is pushed;
so all push_uncle are Ok;
qed");
}); });
open_block open_block
@ -1145,6 +1143,7 @@ impl MiningBlockChainClient for Client {
let block_data = block.rlp_bytes(); let block_data = block.rlp_bytes();
let route = self.commit_block(block, &h, &block_data); let route = self.commit_block(block, &h, &block_data);
trace!(target: "client", "Imported sealed block #{} ({})", number, h); trace!(target: "client", "Imported sealed block #{} ({})", number, h);
self.state_db.lock().sync_cache(&route.enacted, &route.retracted, false);
let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); let (enacted, retracted) = self.calculate_enacted_retracted(&[route]);
self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);

View File

@ -30,13 +30,20 @@ pub use self::test_client::{TestBlockChainClient, EachBlockWith};
pub use types::trace_filter::Filter as TraceFilter; pub use types::trace_filter::Filter as TraceFilter;
pub use executive::{Executed, Executive, TransactOptions}; pub use executive::{Executed, Executive, TransactOptions};
pub use env_info::{LastHashes, EnvInfo}; pub use env_info::{LastHashes, EnvInfo};
pub use self::chain_notify::{ChainNotify, ChainNotifyClient}; pub use self::chain_notify::ChainNotify;
pub use types::call_analytics::CallAnalytics; pub use types::call_analytics::CallAnalytics;
pub use block_import_error::BlockImportError; pub use block_import_error::BlockImportError;
pub use transaction_import::TransactionImportResult; pub use transaction_import::TransactionImportResult;
pub use transaction_import::TransactionImportError; pub use transaction_import::TransactionImportError;
pub use self::traits::{BlockChainClient, MiningBlockChainClient, RemoteClient}; pub use self::traits::{BlockChainClient, MiningBlockChainClient};
/// IPC interfaces
#[cfg(feature="ipc")]
pub mod remote {
pub use super::traits::RemoteClient;
pub use super::chain_notify::ChainNotifyClient;
}
mod traits { mod traits {
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues

View File

@ -55,6 +55,8 @@ pub struct TestBlockChainClient {
pub genesis_hash: H256, pub genesis_hash: H256,
/// Last block hash. /// Last block hash.
pub last_hash: RwLock<H256>, pub last_hash: RwLock<H256>,
/// Extra data do set for each block
pub extra_data: Bytes,
/// Difficulty. /// Difficulty.
pub difficulty: RwLock<U256>, pub difficulty: RwLock<U256>,
/// Balances. /// Balances.
@ -105,11 +107,17 @@ impl Default for TestBlockChainClient {
impl TestBlockChainClient { impl TestBlockChainClient {
/// Creates new test client. /// Creates new test client.
pub fn new() -> Self { pub fn new() -> Self {
Self::new_with_extra_data(Bytes::new())
}
/// Creates new test client with specified extra data for each block
pub fn new_with_extra_data(extra_data: Bytes) -> Self {
let spec = Spec::new_test(); let spec = Spec::new_test();
let mut client = TestBlockChainClient { let mut client = TestBlockChainClient {
blocks: RwLock::new(HashMap::new()), blocks: RwLock::new(HashMap::new()),
numbers: RwLock::new(HashMap::new()), numbers: RwLock::new(HashMap::new()),
genesis_hash: H256::new(), genesis_hash: H256::new(),
extra_data: extra_data,
last_hash: RwLock::new(H256::new()), last_hash: RwLock::new(H256::new()),
difficulty: RwLock::new(From::from(0)), difficulty: RwLock::new(From::from(0)),
balances: RwLock::new(HashMap::new()), balances: RwLock::new(HashMap::new()),
@ -184,6 +192,7 @@ impl TestBlockChainClient {
header.set_parent_hash(self.last_hash.read().clone()); header.set_parent_hash(self.last_hash.read().clone());
header.set_number(n as BlockNumber); header.set_number(n as BlockNumber);
header.set_gas_limit(U256::from(1_000_000)); header.set_gas_limit(U256::from(1_000_000));
header.set_extra_data(self.extra_data.clone());
let uncles = match with { let uncles = match with {
EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
let mut uncles = RlpStream::new_list(1); let mut uncles = RlpStream::new_list(1);
@ -606,6 +615,6 @@ impl BlockChainClient for TestBlockChainClient {
} }
fn pending_transactions(&self) -> Vec<SignedTransaction> { fn pending_transactions(&self) -> Vec<SignedTransaction> {
self.miner.pending_transactions() self.miner.pending_transactions(self.chain_info().best_block_number)
} }
} }

View File

@ -20,6 +20,7 @@ mod message;
mod timeout; mod timeout;
mod params; mod params;
mod vote; mod vote;
mod vote_collector;
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
use common::*; use common::*;
@ -246,9 +247,11 @@ impl Engine for Tendermint {
} }
} }
/// Set author to proposer. /// Set author to proposer and set the correct round in the seal.
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
fn on_close_block(&self, _block: &mut ExecutedBlock) {} fn on_close_block(&self, _block: &mut ExecutedBlock) {
}
/// Attempt to seal the block internally using all available signatures. /// Attempt to seal the block internally using all available signatures.
/// ///
@ -278,11 +281,14 @@ impl Engine for Tendermint {
fn handle_message(&self, sender: Address, signature: H520, message: UntrustedRlp) -> Result<Bytes, Error> { fn handle_message(&self, sender: Address, signature: H520, message: UntrustedRlp) -> Result<Bytes, Error> {
let message: ConsensusMessage = try!(message.as_val()); let message: ConsensusMessage = try!(message.as_val());
try!(Err(EngineError::UnknownStep))
if self.is_authority(&sender) {
//match message { //match message {
// ConsensusMessage::Prevote // ConsensusMessage::Prevote
//} //}
}
try!(Err(EngineError::UnknownStep))
// Check if correct round. // Check if correct round.
//if self.r.load(AtomicOrdering::Relaxed) != try!(message.val_at(0)) { //if self.r.load(AtomicOrdering::Relaxed) != try!(message.val_at(0)) {

View File

@ -21,27 +21,35 @@ use util::Hashable;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use rlp::{View, DecoderError, Decodable, Decoder, Encodable, RlpStream, Stream}; use rlp::{View, DecoderError, Decodable, Decoder, Encodable, RlpStream, Stream};
use basic_types::Seal; use basic_types::Seal;
use super::BlockHash;
#[derive(Debug)] #[derive(Debug, PartialEq, Eq, Hash)]
pub struct Vote { pub struct Vote {
block_hash: BlockHash,
signature: H520 signature: H520
} }
fn message(header: &Header) -> H256 { fn block_hash(header: &Header) -> H256 {
header.rlp(Seal::WithSome(1)).sha3() header.rlp(Seal::WithSome(1)).sha3()
} }
impl Vote { impl Vote {
fn new(signature: H520) -> Vote { Vote { signature: signature }} fn new(block_hash: BlockHash, signature: H520) -> Vote {
Vote { block_hash: block_hash, signature: signature }
}
/// Try to use the author address to create a vote. /// Try to use the author address to create a vote.
pub fn propose(header: &Header, accounts: &AccountProvider) -> Option<Vote> { pub fn propose(header: &Header, accounts: &AccountProvider) -> Option<Vote> {
accounts.sign(*header.author(), message(&header)).ok().map(Into::into).map(Self::new) Self::validate(header, accounts, *header.author())
} }
/// Use any unlocked validator account to create a vote. /// Use any unlocked validator account to create a vote.
pub fn validate(header: &Header, accounts: &AccountProvider, validator: Address) -> Option<Vote> { pub fn validate(header: &Header, accounts: &AccountProvider, validator: Address) -> Option<Vote> {
accounts.sign(validator, message(&header)).ok().map(Into::into).map(Self::new) let message = block_hash(&header);
accounts.sign(validator, message)
.ok()
.map(Into::into)
.map(|sig| Self::new(message, sig))
} }
} }
@ -51,13 +59,14 @@ impl Decodable for Vote {
if decoder.as_raw().len() != try!(rlp.payload_info()).total() { if decoder.as_raw().len() != try!(rlp.payload_info()).total() {
return Err(DecoderError::RlpIsTooBig); return Err(DecoderError::RlpIsTooBig);
} }
rlp.as_val().map(Self::new) Ok(Self::new(try!(rlp.val_at(0)), try!(rlp.val_at(1))))
} }
} }
impl Encodable for Vote { impl Encodable for Vote {
fn rlp_append(&self, s: &mut RlpStream) { fn rlp_append(&self, s: &mut RlpStream) {
let Vote { ref signature } = *self; let Vote { ref block_hash, ref signature } = *self;
s.append(block_hash);
s.append(signature); s.append(signature);
} }
} }

View File

@ -116,11 +116,11 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
let instruction = code[reader.position]; let instruction = code[reader.position];
reader.position += 1; reader.position += 1;
let info = infos[instruction as usize]; let info = &infos[instruction as usize];
try!(self.verify_instruction(ext, instruction, &info, &stack)); try!(self.verify_instruction(ext, instruction, info, &stack));
// Calculate gas cost // Calculate gas cost
let (gas_cost, mem_gas, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, &info, &stack, self.mem.size())); let (gas_cost, mem_gas, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, info, &stack, self.mem.size()));
// TODO: make compile-time removable if too much of a performance hit. // TODO: make compile-time removable if too much of a performance hit.
let trace_executed = ext.trace_prepare_execute(reader.position - 1, instruction, &gas_cost.as_u256()); let trace_executed = ext.trace_prepare_execute(reader.position - 1, instruction, &gas_cost.as_u256());
@ -129,7 +129,7 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
gasometer.current_mem_gas = mem_gas; gasometer.current_mem_gas = mem_gas;
gasometer.current_gas = gasometer.current_gas - gas_cost; gasometer.current_gas = gasometer.current_gas - gas_cost;
evm_debug!({ informant.before_instruction(reader.position, instruction, &info, &gasometer.current_gas, &stack) }); evm_debug!({ informant.before_instruction(reader.position, instruction, info, &gasometer.current_gas, &stack) });
let (mem_written, store_written) = match trace_executed { let (mem_written, store_written) = match trace_executed {
true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)), true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)),

View File

@ -21,7 +21,7 @@ use util::sha3::*;
use bit_set::BitSet; use bit_set::BitSet;
use super::super::instructions; use super::super::instructions;
const CACHE_CODE_ITEMS: usize = 4096; const CACHE_CODE_ITEMS: usize = 65536;
/// GLobal cache for EVM interpreter /// GLobal cache for EVM interpreter
pub struct SharedCache { pub struct SharedCache {

View File

@ -25,10 +25,10 @@ use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, E
use crossbeam; use crossbeam;
pub use types::executed::{Executed, ExecutionResult}; pub use types::executed::{Executed, ExecutionResult};
/// Max depth to avoid stack overflow (when it's reached we start a new thread with VM) /// Roughly estimate what stack size each level of evm depth will use
/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132) /// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132)
/// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp` /// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp`
const MAX_VM_DEPTH_FOR_THREAD: usize = 64; const STACK_SIZE_PER_DEPTH: usize = 24*1024;
/// Returns new address created from address and given nonce. /// Returns new address created from address and given nonce.
pub fn contract_address(address: &Address, nonce: &U256) -> Address { pub fn contract_address(address: &Address, nonce: &U256) -> Address {
@ -149,12 +149,13 @@ impl<'a> Executive<'a> {
// TODO: we might need bigints here, or at least check overflows. // TODO: we might need bigints here, or at least check overflows.
let balance = self.state.balance(&sender); let balance = self.state.balance(&sender);
let gas_cost = U512::from(t.gas) * U512::from(t.gas_price); let gas_cost = t.gas.full_mul(t.gas_price);
let total_cost = U512::from(t.value) + gas_cost; let total_cost = U512::from(t.value) + gas_cost;
// avoid unaffordable transactions // avoid unaffordable transactions
if U512::from(balance) < total_cost { let balance512 = U512::from(balance);
return Err(From::from(ExecutionError::NotEnoughCash { required: total_cost, got: U512::from(balance) })); if balance512 < total_cost {
return Err(From::from(ExecutionError::NotEnoughCash { required: total_cost, got: balance512 }));
} }
// NOTE: there can be no invalid transactions from this point. // NOTE: there can be no invalid transactions from this point.
@ -212,8 +213,11 @@ impl<'a> Executive<'a> {
tracer: &mut T, tracer: &mut T,
vm_tracer: &mut V vm_tracer: &mut V
) -> evm::Result<U256> where T: Tracer, V: VMTracer { ) -> evm::Result<U256> where T: Tracer, V: VMTracer {
let depth_threshold = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get() / STACK_SIZE_PER_DEPTH);
// Ordinary execution - keep VM in same thread // Ordinary execution - keep VM in same thread
if (self.depth + 1) % MAX_VM_DEPTH_FOR_THREAD != 0 { if (self.depth + 1) % depth_threshold != 0 {
let vm_factory = self.vm_factory; let vm_factory = self.vm_factory;
let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer, vm_tracer); let mut ext = self.as_externalities(OriginInfo::from(&params), unconfirmed_substate, output_policy, tracer, vm_tracer);
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call); trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
@ -265,7 +269,7 @@ impl<'a> Executive<'a> {
let cost = self.engine.cost_of_builtin(&params.code_address, data); let cost = self.engine.cost_of_builtin(&params.code_address, data);
if cost <= params.gas { if cost <= params.gas {
self.engine.execute_builtin(&params.code_address, data, &mut output); self.engine.execute_builtin(&params.code_address, data, &mut output);
self.state.clear_snapshot(); self.state.discard_snapshot();
// trace only top level calls to builtins to avoid DDoS attacks // trace only top level calls to builtins to avoid DDoS attacks
if self.depth == 0 { if self.depth == 0 {
@ -285,7 +289,7 @@ impl<'a> Executive<'a> {
Ok(params.gas - cost) Ok(params.gas - cost)
} else { } else {
// just drain the whole gas // just drain the whole gas
self.state.revert_snapshot(); self.state.revert_to_snapshot();
tracer.trace_failed_call(trace_info, vec![], evm::Error::OutOfGas.into()); tracer.trace_failed_call(trace_info, vec![], evm::Error::OutOfGas.into());
@ -331,7 +335,7 @@ impl<'a> Executive<'a> {
res res
} else { } else {
// otherwise it's just a basic transaction, only do tracing, if necessary. // otherwise it's just a basic transaction, only do tracing, if necessary.
self.state.clear_snapshot(); self.state.discard_snapshot();
tracer.trace_call(trace_info, U256::zero(), trace_output, vec![]); tracer.trace_call(trace_info, U256::zero(), trace_output, vec![]);
Ok(params.gas) Ok(params.gas)
@ -413,7 +417,7 @@ impl<'a> Executive<'a> {
// real ammount to refund // real ammount to refund
let gas_left_prerefund = match result { Ok(x) => x, _ => 0.into() }; let gas_left_prerefund = match result { Ok(x) => x, _ => 0.into() };
let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) / U256::from(2)); let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) >> 1);
let gas_left = gas_left_prerefund + refunded; let gas_left = gas_left_prerefund + refunded;
let gas_used = t.gas - gas_left; let gas_used = t.gas - gas_left;
@ -473,10 +477,10 @@ impl<'a> Executive<'a> {
| Err(evm::Error::BadInstruction {.. }) | Err(evm::Error::BadInstruction {.. })
| Err(evm::Error::StackUnderflow {..}) | Err(evm::Error::StackUnderflow {..})
| Err(evm::Error::OutOfStack {..}) => { | Err(evm::Error::OutOfStack {..}) => {
self.state.revert_snapshot(); self.state.revert_to_snapshot();
}, },
Ok(_) | Err(evm::Error::Internal) => { Ok(_) | Err(evm::Error::Internal) => {
self.state.clear_snapshot(); self.state.discard_snapshot();
substate.accrue(un_substate); substate.accrue(un_substate);
} }
} }

View File

@ -48,6 +48,17 @@ pub enum PendingSet {
SealingOrElseQueue, SealingOrElseQueue,
} }
/// Type of the gas limit to apply to the transaction queue.
#[derive(Debug, PartialEq)]
pub enum GasLimit {
/// Depends on the block gas limit and is updated with every block.
Auto,
/// No limit.
None,
/// Set to a fixed gas value.
Fixed(U256),
}
/// Configures the behaviour of the miner. /// Configures the behaviour of the miner.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct MinerOptions { pub struct MinerOptions {
@ -71,6 +82,8 @@ pub struct MinerOptions {
pub work_queue_size: usize, pub work_queue_size: usize,
/// Can we submit two different solutions for the same block and expect both to result in an import? /// Can we submit two different solutions for the same block and expect both to result in an import?
pub enable_resubmission: bool, pub enable_resubmission: bool,
/// Global gas limit for all transaction in the queue except for local and retracted.
pub tx_queue_gas_limit: GasLimit,
} }
impl Default for MinerOptions { impl Default for MinerOptions {
@ -81,11 +94,12 @@ impl Default for MinerOptions {
reseal_on_external_tx: false, reseal_on_external_tx: false,
reseal_on_own_tx: true, reseal_on_own_tx: true,
tx_gas_limit: !U256::zero(), tx_gas_limit: !U256::zero(),
tx_queue_size: 1024, tx_queue_size: 2048,
pending_set: PendingSet::AlwaysQueue, pending_set: PendingSet::AlwaysQueue,
reseal_min_period: Duration::from_secs(2), reseal_min_period: Duration::from_secs(2),
work_queue_size: 20, work_queue_size: 20,
enable_resubmission: true, enable_resubmission: true,
tx_queue_gas_limit: GasLimit::Auto,
} }
} }
} }
@ -194,7 +208,11 @@ impl Miner {
true => None, true => None,
false => Some(WorkPoster::new(&options.new_work_notify)) false => Some(WorkPoster::new(&options.new_work_notify))
}; };
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); let gas_limit = match options.tx_queue_gas_limit {
GasLimit::Fixed(ref limit) => *limit,
_ => !U256::zero(),
};
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, gas_limit, options.tx_gas_limit)));
Miner { Miner {
transaction_queue: txq, transaction_queue: txq,
next_allowed_reseal: Mutex::new(Instant::now()), next_allowed_reseal: Mutex::new(Instant::now()),
@ -443,6 +461,10 @@ impl Miner {
let gas_limit = HeaderView::new(&chain.best_block_header()).gas_limit(); let gas_limit = HeaderView::new(&chain.best_block_header()).gas_limit();
let mut queue = self.transaction_queue.lock(); let mut queue = self.transaction_queue.lock();
queue.set_gas_limit(gas_limit); queue.set_gas_limit(gas_limit);
if let GasLimit::Auto = self.options.tx_queue_gas_limit {
// Set total tx queue gas limit to be 2x the block gas limit.
queue.set_total_gas_limit(gas_limit << 1);
}
} }
/// Returns true if we had to prepare new pending block. /// Returns true if we had to prepare new pending block.
@ -493,6 +515,21 @@ impl Miner {
/// Are we allowed to do a non-mandatory reseal? /// Are we allowed to do a non-mandatory reseal?
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() } fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() }
fn from_pending_block<H, F, G>(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H
where F: Fn() -> H, G: Fn(&ClosedBlock) -> H {
let sealing_work = self.sealing_work.lock();
sealing_work.queue.peek_last_ref().map_or_else(
|| from_chain(),
|b| {
if b.block().header().number() > latest_block_number {
map_block(b)
} else {
from_chain()
}
}
)
}
} }
const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5; const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5;
@ -565,29 +602,35 @@ impl MinerService for Miner {
} }
fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 {
let sealing_work = self.sealing_work.lock(); self.from_pending_block(
sealing_work.queue.peek_last_ref().map_or_else( chain.chain_info().best_block_number,
|| chain.latest_balance(address), || chain.latest_balance(address),
|b| b.block().fields().state.balance(address) |b| b.block().fields().state.balance(address)
) )
} }
fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 {
let sealing_work = self.sealing_work.lock(); self.from_pending_block(
sealing_work.queue.peek_last_ref().map_or_else( chain.chain_info().best_block_number,
|| chain.latest_storage_at(address, position), || chain.latest_storage_at(address, position),
|b| b.block().fields().state.storage_at(address, position) |b| b.block().fields().state.storage_at(address, position)
) )
} }
fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 {
let sealing_work = self.sealing_work.lock(); self.from_pending_block(
sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address)) chain.chain_info().best_block_number,
|| chain.latest_nonce(address),
|b| b.block().fields().state.nonce(address)
)
} }
fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Bytes> { fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Bytes> {
let sealing_work = self.sealing_work.lock(); self.from_pending_block(
sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_code(address), |b| b.block().fields().state.code(address).map(|c| (*c).clone())) chain.chain_info().best_block_number,
|| chain.latest_code(address),
|b| b.block().fields().state.code(address).map(|c| (*c).clone())
)
} }
fn set_author(&self, author: Address) { fn set_author(&self, author: Address) {
@ -737,50 +780,74 @@ impl MinerService for Miner {
queue.top_transactions() queue.top_transactions()
} }
fn pending_transactions(&self) -> Vec<SignedTransaction> { fn pending_transactions(&self, best_block: BlockNumber) -> Vec<SignedTransaction> {
let queue = self.transaction_queue.lock(); let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock(); match self.options.pending_set {
// TODO: should only use the sealing_work when it's current (it could be an old block) PendingSet::AlwaysQueue => queue.top_transactions(),
let sealing_set = match sw.enabled { PendingSet::SealingOrElseQueue => {
true => sw.queue.peek_last_ref(), self.from_pending_block(
false => None, best_block,
}; || queue.top_transactions(),
match (&self.options.pending_set, sealing_set) { |sealing| sealing.transactions().to_owned()
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.top_transactions(), )
(_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().to_owned()), },
PendingSet::AlwaysSealing => {
self.from_pending_block(
best_block,
|| vec![],
|sealing| sealing.transactions().to_owned()
)
},
} }
} }
fn pending_transactions_hashes(&self) -> Vec<H256> { fn pending_transactions_hashes(&self, best_block: BlockNumber) -> Vec<H256> {
let queue = self.transaction_queue.lock(); let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock(); match self.options.pending_set {
let sealing_set = match sw.enabled { PendingSet::AlwaysQueue => queue.pending_hashes(),
true => sw.queue.peek_last_ref(), PendingSet::SealingOrElseQueue => {
false => None, self.from_pending_block(
}; best_block,
match (&self.options.pending_set, sealing_set) { || queue.pending_hashes(),
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.pending_hashes(), |sealing| sealing.transactions().iter().map(|t| t.hash()).collect()
(_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().iter().map(|t| t.hash()).collect()), )
},
PendingSet::AlwaysSealing => {
self.from_pending_block(
best_block,
|| vec![],
|sealing| sealing.transactions().iter().map(|t| t.hash()).collect()
)
},
} }
} }
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> { fn transaction(&self, best_block: BlockNumber, hash: &H256) -> Option<SignedTransaction> {
let queue = self.transaction_queue.lock(); let queue = self.transaction_queue.lock();
let sw = self.sealing_work.lock(); match self.options.pending_set {
let sealing_set = match sw.enabled { PendingSet::AlwaysQueue => queue.find(hash),
true => sw.queue.peek_last_ref(), PendingSet::SealingOrElseQueue => {
false => None, self.from_pending_block(
}; best_block,
match (&self.options.pending_set, sealing_set) { || queue.find(hash),
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.find(hash), |sealing| sealing.transactions().iter().find(|t| &t.hash() == hash).cloned()
(_, sealing) => sealing.and_then(|s| s.transactions().iter().find(|t| &t.hash() == hash).cloned()), )
},
PendingSet::AlwaysSealing => {
self.from_pending_block(
best_block,
|| None,
|sealing| sealing.transactions().iter().find(|t| &t.hash() == hash).cloned()
)
},
} }
} }
fn pending_receipt(&self, hash: &H256) -> Option<RichReceipt> { fn pending_receipt(&self, best_block: BlockNumber, hash: &H256) -> Option<RichReceipt> {
let sealing_work = self.sealing_work.lock(); self.from_pending_block(
match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) { best_block,
(true, Some(pending)) => { || None,
|pending| {
let txs = pending.transactions(); let txs = pending.transactions();
txs.iter() txs.iter()
.map(|t| t.hash()) .map(|t| t.hash())
@ -801,15 +868,15 @@ impl MinerService for Miner {
logs: receipt.logs.clone(), logs: receipt.logs.clone(),
} }
}) })
},
_ => None
} }
)
} }
fn pending_receipts(&self) -> BTreeMap<H256, Receipt> { fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap<H256, Receipt> {
let sealing_work = self.sealing_work.lock(); self.from_pending_block(
match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) { best_block,
(true, Some(pending)) => { || BTreeMap::new(),
|pending| {
let hashes = pending.transactions() let hashes = pending.transactions()
.iter() .iter()
.map(|t| t.hash()); .map(|t| t.hash());
@ -817,9 +884,8 @@ impl MinerService for Miner {
let receipts = pending.receipts().iter().cloned(); let receipts = pending.receipts().iter().cloned();
hashes.zip(receipts).collect() hashes.zip(receipts).collect()
},
_ => BTreeMap::new()
} }
)
} }
fn last_nonce(&self, address: &Address) -> Option<U256> { fn last_nonce(&self, address: &Address) -> Option<U256> {
@ -1016,6 +1082,7 @@ mod tests {
reseal_min_period: Duration::from_secs(5), reseal_min_period: Duration::from_secs(5),
tx_gas_limit: !U256::zero(), tx_gas_limit: !U256::zero(),
tx_queue_size: 1024, tx_queue_size: 1024,
tx_queue_gas_limit: GasLimit::None,
pending_set: PendingSet::AlwaysSealing, pending_set: PendingSet::AlwaysSealing,
work_queue_size: 5, work_queue_size: 5,
enable_resubmission: true, enable_resubmission: true,
@ -1044,34 +1111,54 @@ mod tests {
let client = TestBlockChainClient::default(); let client = TestBlockChainClient::default();
let miner = miner(); let miner = miner();
let transaction = transaction(); let transaction = transaction();
let best_block = 0;
// when // when
let res = miner.import_own_transaction(&client, transaction); let res = miner.import_own_transaction(&client, transaction);
// then // then
assert_eq!(res.unwrap(), TransactionImportResult::Current); assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(miner.all_transactions().len(), 1); assert_eq!(miner.all_transactions().len(), 1);
assert_eq!(miner.pending_transactions().len(), 1); assert_eq!(miner.pending_transactions(best_block).len(), 1);
assert_eq!(miner.pending_transactions_hashes().len(), 1); assert_eq!(miner.pending_transactions_hashes(best_block).len(), 1);
assert_eq!(miner.pending_receipts().len(), 1); assert_eq!(miner.pending_receipts(best_block).len(), 1);
// This method will let us know if pending block was created (before calling that method) // This method will let us know if pending block was created (before calling that method)
assert!(!miner.prepare_work_sealing(&client)); assert!(!miner.prepare_work_sealing(&client));
} }
#[test]
fn should_not_use_pending_block_if_best_block_is_higher() {
// given
let client = TestBlockChainClient::default();
let miner = miner();
let transaction = transaction();
let best_block = 10;
// when
let res = miner.import_own_transaction(&client, transaction);
// then
assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(miner.all_transactions().len(), 1);
assert_eq!(miner.pending_transactions(best_block).len(), 0);
assert_eq!(miner.pending_transactions_hashes(best_block).len(), 0);
assert_eq!(miner.pending_receipts(best_block).len(), 0);
}
#[test] #[test]
fn should_import_external_transaction() { fn should_import_external_transaction() {
// given // given
let client = TestBlockChainClient::default(); let client = TestBlockChainClient::default();
let miner = miner(); let miner = miner();
let transaction = transaction(); let transaction = transaction();
let best_block = 0;
// when // when
let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap(); let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap();
// then // then
assert_eq!(res.unwrap(), TransactionImportResult::Current); assert_eq!(res.unwrap(), TransactionImportResult::Current);
assert_eq!(miner.all_transactions().len(), 1); assert_eq!(miner.all_transactions().len(), 1);
assert_eq!(miner.pending_transactions_hashes().len(), 0); assert_eq!(miner.pending_transactions_hashes(best_block).len(), 0);
assert_eq!(miner.pending_transactions().len(), 0); assert_eq!(miner.pending_transactions(best_block).len(), 0);
assert_eq!(miner.pending_receipts().len(), 0); assert_eq!(miner.pending_receipts(best_block).len(), 0);
// This method will let us know if pending block was created (before calling that method) // This method will let us know if pending block was created (before calling that method)
assert!(miner.prepare_work_sealing(&client)); assert!(miner.prepare_work_sealing(&client));
} }

View File

@ -48,7 +48,7 @@ mod work_notify;
mod price_info; mod price_info;
pub use self::transaction_queue::{TransactionQueue, AccountDetails, TransactionOrigin}; pub use self::transaction_queue::{TransactionQueue, AccountDetails, TransactionOrigin};
pub use self::miner::{Miner, MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions}; pub use self::miner::{Miner, MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions, GasLimit};
pub use self::external::{ExternalMiner, ExternalMinerService}; pub use self::external::{ExternalMiner, ExternalMinerService};
pub use client::TransactionImportResult; pub use client::TransactionImportResult;
@ -56,6 +56,7 @@ use std::collections::BTreeMap;
use util::{H256, U256, Address, Bytes}; use util::{H256, U256, Address, Bytes};
use client::{MiningBlockChainClient, Executed, CallAnalytics}; use client::{MiningBlockChainClient, Executed, CallAnalytics};
use block::ClosedBlock; use block::ClosedBlock;
use header::BlockNumber;
use receipt::{RichReceipt, Receipt}; use receipt::{RichReceipt, Receipt};
use error::{Error, CallError}; use error::{Error, CallError};
use transaction::SignedTransaction; use transaction::SignedTransaction;
@ -115,7 +116,7 @@ pub trait MinerService : Send + Sync {
Result<TransactionImportResult, Error>; Result<TransactionImportResult, Error>;
/// Returns hashes of transactions currently in pending /// Returns hashes of transactions currently in pending
fn pending_transactions_hashes(&self) -> Vec<H256>; fn pending_transactions_hashes(&self, best_block: BlockNumber) -> Vec<H256>;
/// Removes all transactions from the queue and restart mining operation. /// Removes all transactions from the queue and restart mining operation.
fn clear_and_reset(&self, chain: &MiningBlockChainClient); fn clear_and_reset(&self, chain: &MiningBlockChainClient);
@ -135,19 +136,19 @@ pub trait MinerService : Send + Sync {
where F: FnOnce(&ClosedBlock) -> T, Self: Sized; where F: FnOnce(&ClosedBlock) -> T, Self: Sized;
/// Query pending transactions for hash. /// Query pending transactions for hash.
fn transaction(&self, hash: &H256) -> Option<SignedTransaction>; fn transaction(&self, best_block: BlockNumber, hash: &H256) -> Option<SignedTransaction>;
/// Get a list of all transactions. /// Get a list of all transactions.
fn all_transactions(&self) -> Vec<SignedTransaction>; fn all_transactions(&self) -> Vec<SignedTransaction>;
/// Get a list of all pending transactions. /// Get a list of all pending transactions.
fn pending_transactions(&self) -> Vec<SignedTransaction>; fn pending_transactions(&self, best_block: BlockNumber) -> Vec<SignedTransaction>;
/// Get a list of all pending receipts. /// Get a list of all pending receipts.
fn pending_receipts(&self) -> BTreeMap<H256, Receipt>; fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap<H256, Receipt>;
/// Get a particular reciept. /// Get a particular reciept.
fn pending_receipt(&self, hash: &H256) -> Option<RichReceipt>; fn pending_receipt(&self, best_block: BlockNumber, hash: &H256) -> Option<RichReceipt>;
/// Returns highest transaction nonce for given address. /// Returns highest transaction nonce for given address.
fn last_nonce(&self, address: &Address) -> Option<U256>; fn last_nonce(&self, address: &Address) -> Option<U256>;

View File

@ -130,6 +130,8 @@ struct TransactionOrder {
/// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5) /// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5)
/// High nonce_height = Low priority (processed later) /// High nonce_height = Low priority (processed later)
nonce_height: U256, nonce_height: U256,
/// Gas specified in the transaction.
gas: U256,
/// Gas Price of the transaction. /// Gas Price of the transaction.
/// Low gas price = Low priority (processed later) /// Low gas price = Low priority (processed later)
gas_price: U256, gas_price: U256,
@ -146,6 +148,7 @@ impl TransactionOrder {
fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self {
TransactionOrder { TransactionOrder {
nonce_height: tx.nonce() - base_nonce, nonce_height: tx.nonce() - base_nonce,
gas: tx.transaction.gas.clone(),
gas_price: tx.transaction.gas_price, gas_price: tx.transaction.gas_price,
hash: tx.hash(), hash: tx.hash(),
origin: tx.origin, origin: tx.origin,
@ -287,6 +290,7 @@ struct TransactionSet {
by_address: Table<Address, U256, TransactionOrder>, by_address: Table<Address, U256, TransactionOrder>,
by_gas_price: GasPriceQueue, by_gas_price: GasPriceQueue,
limit: usize, limit: usize,
gas_limit: U256,
} }
impl TransactionSet { impl TransactionSet {
@ -317,15 +321,20 @@ impl TransactionSet {
/// It drops transactions from this set but also removes associated `VerifiedTransaction`. /// It drops transactions from this set but also removes associated `VerifiedTransaction`.
/// Returns addresses and lowest nonces of transactions removed because of limit. /// Returns addresses and lowest nonces of transactions removed because of limit.
fn enforce_limit(&mut self, by_hash: &mut HashMap<H256, VerifiedTransaction>) -> Option<HashMap<Address, U256>> { fn enforce_limit(&mut self, by_hash: &mut HashMap<H256, VerifiedTransaction>) -> Option<HashMap<Address, U256>> {
let len = self.by_priority.len(); let mut count = 0;
if len <= self.limit { let mut gas: U256 = 0.into();
return None;
}
let to_drop : Vec<(Address, U256)> = { let to_drop : Vec<(Address, U256)> = {
self.by_priority self.by_priority
.iter() .iter()
.skip(self.limit) .skip_while(|order| {
count = count + 1;
let r = gas.overflowing_add(order.gas);
if r.1 { return false }
gas = r.0;
// Own and retracted transactions are allowed to go above the gas limit, bot not above the count limit.
(gas <= self.gas_limit || order.origin == TransactionOrigin::Local || order.origin == TransactionOrigin::RetractedBlock) &&
count <= self.limit
})
.map(|order| by_hash.get(&order.hash) .map(|order| by_hash.get(&order.hash)
.expect("All transactions in `self.by_priority` and `self.by_address` are kept in sync with `by_hash`.")) .expect("All transactions in `self.by_priority` and `self.by_address` are kept in sync with `by_hash`."))
.map(|tx| (tx.sender(), tx.nonce())) .map(|tx| (tx.sender(), tx.nonce()))
@ -432,16 +441,17 @@ impl Default for TransactionQueue {
impl TransactionQueue { impl TransactionQueue {
/// Creates new instance of this Queue /// Creates new instance of this Queue
pub fn new() -> Self { pub fn new() -> Self {
Self::with_limits(1024, !U256::zero()) Self::with_limits(1024, !U256::zero(), !U256::zero())
} }
/// Create new instance of this Queue with specified limits /// Create new instance of this Queue with specified limits
pub fn with_limits(limit: usize, tx_gas_limit: U256) -> Self { pub fn with_limits(limit: usize, gas_limit: U256, tx_gas_limit: U256) -> Self {
let current = TransactionSet { let current = TransactionSet {
by_priority: BTreeSet::new(), by_priority: BTreeSet::new(),
by_address: Table::new(), by_address: Table::new(),
by_gas_price: Default::default(), by_gas_price: Default::default(),
limit: limit, limit: limit,
gas_limit: gas_limit,
}; };
let future = TransactionSet { let future = TransactionSet {
@ -449,6 +459,7 @@ impl TransactionQueue {
by_address: Table::new(), by_address: Table::new(),
by_gas_price: Default::default(), by_gas_price: Default::default(),
limit: limit, limit: limit,
gas_limit: gas_limit,
}; };
TransactionQueue { TransactionQueue {
@ -504,6 +515,13 @@ impl TransactionQueue {
}; };
} }
/// Sets new total gas limit.
pub fn set_total_gas_limit(&mut self, gas_limit: U256) {
self.future.gas_limit = gas_limit;
self.current.gas_limit = gas_limit;
self.future.enforce_limit(&mut self.by_hash);
}
/// Set the new limit for the amount of gas any individual transaction may have. /// Set the new limit for the amount of gas any individual transaction may have.
/// Any transaction already imported to the queue is not affected. /// Any transaction already imported to the queue is not affected.
pub fn set_tx_gas_limit(&mut self, limit: U256) { pub fn set_tx_gas_limit(&mut self, limit: U256) {
@ -636,7 +654,7 @@ impl TransactionQueue {
}; };
for k in nonces_from_sender { for k in nonces_from_sender {
let order = self.future.drop(&sender, &k).unwrap(); let order = self.future.drop(&sender, &k).unwrap();
self.current.insert(sender, k, order.penalize()); self.future.insert(sender, k, order.penalize());
} }
} }
@ -735,6 +753,15 @@ impl TransactionQueue {
.collect() .collect()
} }
#[cfg(test)]
fn future_transactions(&self) -> Vec<SignedTransaction> {
self.future.by_priority
.iter()
.map(|t| self.by_hash.get(&t.hash).expect("All transactions in `current` and `future` are always included in `by_hash`"))
.map(|t| t.transaction.clone())
.collect()
}
/// Returns hashes of all transactions from current, ordered by priority. /// Returns hashes of all transactions from current, ordered by priority.
pub fn pending_hashes(&self) -> Vec<H256> { pub fn pending_hashes(&self) -> Vec<H256> {
self.current.by_priority self.current.by_priority
@ -818,6 +845,16 @@ impl TransactionQueue {
let nonce = tx.nonce(); let nonce = tx.nonce();
let hash = tx.hash(); let hash = tx.hash();
{
// Rough size sanity check
let gas = &tx.transaction.gas;
if U256::from(tx.transaction.data.len()) > *gas {
// Droping transaction
trace!(target: "txqueue", "Dropping oversized transaction: {:?} (gas: {} < size {})", hash, gas, tx.transaction.data.len());
return Err(TransactionError::LimitReached);
}
}
// The transaction might be old, let's check that. // The transaction might be old, let's check that.
// This has to be the first test, otherwise calculating // This has to be the first test, otherwise calculating
// nonce height would result in overflow. // nonce height would result in overflow.
@ -970,6 +1007,7 @@ mod test {
} }
fn default_nonce() -> U256 { 123.into() } fn default_nonce() -> U256 { 123.into() }
fn default_gas_val() -> U256 { 100_000.into() }
fn default_gas_price() -> U256 { 1.into() } fn default_gas_price() -> U256 { 1.into() }
fn new_unsigned_tx(nonce: U256, gas_price: U256) -> Transaction { fn new_unsigned_tx(nonce: U256, gas_price: U256) -> Transaction {
@ -977,7 +1015,7 @@ mod test {
action: Action::Create, action: Action::Create,
value: U256::from(100), value: U256::from(100),
data: "3331600055".from_hex().unwrap(), data: "3331600055".from_hex().unwrap(),
gas: U256::from(100_000), gas: default_gas_val(),
gas_price: gas_price, gas_price: gas_price,
nonce: nonce nonce: nonce
} }
@ -1042,7 +1080,7 @@ mod test {
#[test] #[test]
fn should_return_correct_nonces_when_dropped_because_of_limit() { fn should_return_correct_nonces_when_dropped_because_of_limit() {
// given // given
let mut txq = TransactionQueue::with_limits(2, !U256::zero()); let mut txq = TransactionQueue::with_limits(2, !U256::zero(), !U256::zero());
let (tx1, tx2) = new_tx_pair(123.into(), 1.into(), 1.into(), 0.into()); let (tx1, tx2) = new_tx_pair(123.into(), 1.into(), 1.into(), 0.into());
let sender = tx1.sender().unwrap(); let sender = tx1.sender().unwrap();
let nonce = tx1.nonce; let nonce = tx1.nonce;
@ -1080,7 +1118,8 @@ mod test {
by_priority: BTreeSet::new(), by_priority: BTreeSet::new(),
by_address: Table::new(), by_address: Table::new(),
by_gas_price: Default::default(), by_gas_price: Default::default(),
limit: 1 limit: 1,
gas_limit: !U256::zero(),
}; };
let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into());
let tx1 = VerifiedTransaction::new(tx1, TransactionOrigin::External).unwrap(); let tx1 = VerifiedTransaction::new(tx1, TransactionOrigin::External).unwrap();
@ -1120,7 +1159,8 @@ mod test {
by_priority: BTreeSet::new(), by_priority: BTreeSet::new(),
by_address: Table::new(), by_address: Table::new(),
by_gas_price: Default::default(), by_gas_price: Default::default(),
limit: 1 limit: 1,
gas_limit: !U256::zero(),
}; };
// Create two transactions with same nonce // Create two transactions with same nonce
// (same hash) // (same hash)
@ -1168,7 +1208,8 @@ mod test {
by_priority: BTreeSet::new(), by_priority: BTreeSet::new(),
by_address: Table::new(), by_address: Table::new(),
by_gas_price: Default::default(), by_gas_price: Default::default(),
limit: 2 limit: 2,
gas_limit: !U256::zero(),
}; };
let tx = new_tx_default(); let tx = new_tx_default();
let tx1 = VerifiedTransaction::new(tx.clone(), TransactionOrigin::External).unwrap(); let tx1 = VerifiedTransaction::new(tx.clone(), TransactionOrigin::External).unwrap();
@ -1185,7 +1226,8 @@ mod test {
by_priority: BTreeSet::new(), by_priority: BTreeSet::new(),
by_address: Table::new(), by_address: Table::new(),
by_gas_price: Default::default(), by_gas_price: Default::default(),
limit: 1 limit: 1,
gas_limit: !U256::zero(),
}; };
assert_eq!(set.gas_price_entry_limit(), 0.into()); assert_eq!(set.gas_price_entry_limit(), 0.into());
@ -1463,6 +1505,36 @@ mod test {
assert_eq!(top.len(), 2); assert_eq!(top.len(), 2);
} }
#[test]
fn should_penalize_transactions_from_sender_in_future() {
// given
let prev_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance: !U256::zero() };
let mut txq = TransactionQueue::new();
// txa, txb - slightly bigger gas price to have consistent ordering
let (txa, txb) = new_tx_pair_default(1.into(), 0.into());
let (tx1, tx2) = new_tx_pair_with_gas_price_increment(3.into());
// insert everything
txq.add(txa.clone(), &prev_nonce, TransactionOrigin::External).unwrap();
txq.add(txb.clone(), &prev_nonce, TransactionOrigin::External).unwrap();
txq.add(tx1.clone(), &prev_nonce, TransactionOrigin::External).unwrap();
txq.add(tx2.clone(), &prev_nonce, TransactionOrigin::External).unwrap();
assert_eq!(txq.status().future, 4);
// when
txq.penalize(&tx1.hash());
// then
let top = txq.future_transactions();
assert_eq!(top[0], txa);
assert_eq!(top[1], txb);
assert_eq!(top[2], tx1);
assert_eq!(top[3], tx2);
assert_eq!(top.len(), 4);
}
#[test] #[test]
fn should_penalize_transactions_from_sender() { fn should_penalize_transactions_from_sender() {
// given // given
@ -1651,7 +1723,7 @@ mod test {
#[test] #[test]
fn should_drop_old_transactions_when_hitting_the_limit() { fn should_drop_old_transactions_when_hitting_the_limit() {
// given // given
let mut txq = TransactionQueue::with_limits(1, !U256::zero()); let mut txq = TransactionQueue::with_limits(1, !U256::zero(), !U256::zero());
let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); let (tx, tx2) = new_tx_pair_default(1.into(), 0.into());
let sender = tx.sender().unwrap(); let sender = tx.sender().unwrap();
let nonce = tx.nonce; let nonce = tx.nonce;
@ -1672,7 +1744,7 @@ mod test {
#[test] #[test]
fn should_limit_future_transactions() { fn should_limit_future_transactions() {
let mut txq = TransactionQueue::with_limits(1, !U256::zero()); let mut txq = TransactionQueue::with_limits(1, !U256::zero(), !U256::zero());
txq.current.set_limit(10); txq.current.set_limit(10);
let (tx1, tx2) = new_tx_pair_default(4.into(), 1.into()); let (tx1, tx2) = new_tx_pair_default(4.into(), 1.into());
let (tx3, tx4) = new_tx_pair_default(4.into(), 2.into()); let (tx3, tx4) = new_tx_pair_default(4.into(), 2.into());
@ -1689,6 +1761,30 @@ mod test {
assert_eq!(txq.status().future, 1); assert_eq!(txq.status().future, 1);
} }
#[test]
fn should_limit_by_gas() {
let mut txq = TransactionQueue::with_limits(100, default_gas_val() * U256::from(2), !U256::zero());
let (tx1, tx2) = new_tx_pair_default(U256::from(1), U256::from(1));
let (tx3, tx4) = new_tx_pair_default(U256::from(1), U256::from(2));
txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).ok();
txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).ok();
txq.add(tx3.clone(), &default_account_details, TransactionOrigin::External).ok();
txq.add(tx4.clone(), &default_account_details, TransactionOrigin::External).ok();
assert_eq!(txq.status().pending, 2);
}
#[test]
fn should_keep_own_transactions_above_gas_limit() {
let mut txq = TransactionQueue::with_limits(100, default_gas_val() * U256::from(2), !U256::zero());
let (tx1, tx2) = new_tx_pair_default(U256::from(1), U256::from(1));
let (tx3, tx4) = new_tx_pair_default(U256::from(1), U256::from(2));
txq.add(tx1.clone(), &default_account_details, TransactionOrigin::Local).unwrap();
txq.add(tx2.clone(), &default_account_details, TransactionOrigin::Local).unwrap();
txq.add(tx3.clone(), &default_account_details, TransactionOrigin::Local).unwrap();
txq.add(tx4.clone(), &default_account_details, TransactionOrigin::Local).unwrap();
assert_eq!(txq.status().pending, 4);
}
#[test] #[test]
fn should_drop_transactions_with_old_nonces() { fn should_drop_transactions_with_old_nonces() {
let mut txq = TransactionQueue::new(); let mut txq = TransactionQueue::new();
@ -1932,7 +2028,7 @@ mod test {
#[test] #[test]
fn should_keep_right_order_in_future() { fn should_keep_right_order_in_future() {
// given // given
let mut txq = TransactionQueue::with_limits(1, !U256::zero()); let mut txq = TransactionQueue::with_limits(1, !U256::zero(), !U256::zero());
let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into());
let prev_nonce = |a: &Address| AccountDetails { nonce: default_account_details(a).nonce - U256::one(), balance: let prev_nonce = |a: &Address| AccountDetails { nonce: default_account_details(a).nonce - U256::one(), balance:
default_account_details(a).balance }; default_account_details(a).balance };

View File

@ -51,7 +51,7 @@ use rand::{Rng, OsRng};
pub use self::error::Error; pub use self::error::Error;
pub use self::service::{Service, DatabaseRestore}; pub use self::service::{Service, DatabaseRestore};
pub use self::traits::{SnapshotService, RemoteSnapshotService}; pub use self::traits::SnapshotService;
pub use self::watcher::Watcher; pub use self::watcher::Watcher;
pub use types::snapshot_manifest::ManifestData; pub use types::snapshot_manifest::ManifestData;
pub use types::restoration_status::RestorationStatus; pub use types::restoration_status::RestorationStatus;
@ -67,6 +67,12 @@ mod watcher;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
/// IPC interfaces
#[cfg(feature="ipc")]
pub mod remote {
pub use super::traits::RemoteSnapshotService;
}
mod traits { mod traits {
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
include!(concat!(env!("OUT_DIR"), "/snapshot_service_trait.rs")); include!(concat!(env!("OUT_DIR"), "/snapshot_service_trait.rs"));

View File

@ -16,7 +16,6 @@
//! Single account in the system. //! Single account in the system.
use std::collections::hash_map::Entry;
use util::*; use util::*;
use pod_account::*; use pod_account::*;
use rlp::*; use rlp::*;
@ -24,9 +23,11 @@ use lru_cache::LruCache;
use std::cell::{RefCell, Cell}; use std::cell::{RefCell, Cell};
const STORAGE_CACHE_ITEMS: usize = 4096; const STORAGE_CACHE_ITEMS: usize = 8192;
/// Single account in the system. /// Single account in the system.
/// Keeps track of changes to the code and storage.
/// The changes are applied in `commit_storage` and `commit_code`
pub struct Account { pub struct Account {
// Balance of the account. // Balance of the account.
balance: U256, balance: U256,
@ -46,8 +47,6 @@ pub struct Account {
code_size: Option<usize>, code_size: Option<usize>,
// Code cache of the account. // Code cache of the account.
code_cache: Arc<Bytes>, code_cache: Arc<Bytes>,
// Account is new or has been modified.
filth: Filth,
// Account code new or has been modified. // Account code new or has been modified.
code_filth: Filth, code_filth: Filth,
// Cached address hash. // Cached address hash.
@ -67,7 +66,6 @@ impl Account {
code_hash: code.sha3(), code_hash: code.sha3(),
code_size: Some(code.len()), code_size: Some(code.len()),
code_cache: Arc::new(code), code_cache: Arc::new(code),
filth: Filth::Dirty,
code_filth: Filth::Dirty, code_filth: Filth::Dirty,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -89,7 +87,6 @@ impl Account {
code_filth: Filth::Dirty, code_filth: Filth::Dirty,
code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())), code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())),
code_cache: Arc::new(pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c)), code_cache: Arc::new(pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c)),
filth: Filth::Dirty,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
} }
@ -105,7 +102,6 @@ impl Account {
code_hash: SHA3_EMPTY, code_hash: SHA3_EMPTY,
code_cache: Arc::new(vec![]), code_cache: Arc::new(vec![]),
code_size: Some(0), code_size: Some(0),
filth: Filth::Dirty,
code_filth: Filth::Clean, code_filth: Filth::Clean,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -123,7 +119,6 @@ impl Account {
code_hash: r.val_at(3), code_hash: r.val_at(3),
code_cache: Arc::new(vec![]), code_cache: Arc::new(vec![]),
code_size: None, code_size: None,
filth: Filth::Clean,
code_filth: Filth::Clean, code_filth: Filth::Clean,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -141,7 +136,6 @@ impl Account {
code_hash: SHA3_EMPTY, code_hash: SHA3_EMPTY,
code_cache: Arc::new(vec![]), code_cache: Arc::new(vec![]),
code_size: None, code_size: None,
filth: Filth::Dirty,
code_filth: Filth::Clean, code_filth: Filth::Clean,
address_hash: Cell::new(None), address_hash: Cell::new(None),
} }
@ -153,7 +147,6 @@ impl Account {
self.code_hash = code.sha3(); self.code_hash = code.sha3();
self.code_cache = Arc::new(code); self.code_cache = Arc::new(code);
self.code_size = Some(self.code_cache.len()); self.code_size = Some(self.code_cache.len());
self.filth = Filth::Dirty;
self.code_filth = Filth::Dirty; self.code_filth = Filth::Dirty;
} }
@ -164,17 +157,7 @@ impl Account {
/// Set (and cache) the contents of the trie's storage at `key` to `value`. /// Set (and cache) the contents of the trie's storage at `key` to `value`.
pub fn set_storage(&mut self, key: H256, value: H256) { pub fn set_storage(&mut self, key: H256, value: H256) {
match self.storage_changes.entry(key) { self.storage_changes.insert(key, value);
Entry::Occupied(ref mut entry) if entry.get() != &value => {
entry.insert(value);
self.filth = Filth::Dirty;
},
Entry::Vacant(entry) => {
entry.insert(value);
self.filth = Filth::Dirty;
},
_ => {},
}
} }
/// Get (and cache) the contents of the trie's storage at `key`. /// Get (and cache) the contents of the trie's storage at `key`.
@ -263,17 +246,6 @@ impl Account {
!self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == SHA3_EMPTY) !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == SHA3_EMPTY)
} }
/// Is this a new or modified account?
pub fn is_dirty(&self) -> bool {
self.filth == Filth::Dirty || self.code_filth == Filth::Dirty || !self.storage_is_clean()
}
/// Mark account as clean.
pub fn set_clean(&mut self) {
assert!(self.storage_is_clean());
self.filth = Filth::Clean
}
/// Provide a database to get `code_hash`. Should not be called if it is a contract without code. /// Provide a database to get `code_hash`. Should not be called if it is a contract without code.
pub fn cache_code(&mut self, db: &HashDB) -> bool { pub fn cache_code(&mut self, db: &HashDB) -> bool {
// TODO: fill out self.code_cache; // TODO: fill out self.code_cache;
@ -326,25 +298,18 @@ impl Account {
/// Increment the nonce of the account by one. /// Increment the nonce of the account by one.
pub fn inc_nonce(&mut self) { pub fn inc_nonce(&mut self) {
self.nonce = self.nonce + U256::from(1u8); self.nonce = self.nonce + U256::from(1u8);
self.filth = Filth::Dirty;
} }
/// Increment the nonce of the account by one. /// Increase account balance.
pub fn add_balance(&mut self, x: &U256) { pub fn add_balance(&mut self, x: &U256) {
if !x.is_zero() {
self.balance = self.balance + *x; self.balance = self.balance + *x;
self.filth = Filth::Dirty;
}
} }
/// Increment the nonce of the account by one. /// Decrease account balance.
/// Panics if balance is less than `x` /// Panics if balance is less than `x`
pub fn sub_balance(&mut self, x: &U256) { pub fn sub_balance(&mut self, x: &U256) {
if !x.is_zero() {
assert!(self.balance >= *x); assert!(self.balance >= *x);
self.balance = self.balance - *x; self.balance = self.balance - *x;
self.filth = Filth::Dirty;
}
} }
/// Commit the `storage_changes` to the backing DB and update `storage_root`. /// Commit the `storage_changes` to the backing DB and update `storage_root`.
@ -406,7 +371,6 @@ impl Account {
code_hash: self.code_hash.clone(), code_hash: self.code_hash.clone(),
code_size: self.code_size.clone(), code_size: self.code_size.clone(),
code_cache: self.code_cache.clone(), code_cache: self.code_cache.clone(),
filth: self.filth,
code_filth: self.code_filth, code_filth: self.code_filth,
address_hash: self.address_hash.clone(), address_hash: self.address_hash.clone(),
} }
@ -427,10 +391,10 @@ impl Account {
account account
} }
/// Replace self with the data from other account merging storage cache /// Replace self with the data from other account merging storage cache.
pub fn merge_with(&mut self, other: Account) { /// Basic account data and all modifications are overwritten
assert!(self.storage_is_clean()); /// with new values.
assert!(other.storage_is_clean()); pub fn overwrite_with(&mut self, other: Account) {
self.balance = other.balance; self.balance = other.balance;
self.nonce = other.nonce; self.nonce = other.nonce;
self.storage_root = other.storage_root; self.storage_root = other.storage_root;
@ -443,6 +407,7 @@ impl Account {
for (k, v) in other.storage_cache.into_inner().into_iter() { for (k, v) in other.storage_cache.into_inner().into_iter() {
cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here
} }
self.storage_changes = other.storage_changes;
} }
} }

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::cell::{RefCell, RefMut}; use std::cell::{RefCell, RefMut};
use std::collections::hash_map::Entry;
use common::*; use common::*;
use engines::Engine; use engines::Engine;
use executive::{Executive, TransactOptions}; use executive::{Executive, TransactOptions};
@ -42,42 +43,93 @@ pub struct ApplyOutcome {
/// Result type for the execution ("application") of a transaction. /// Result type for the execution ("application") of a transaction.
pub type ApplyResult = Result<ApplyOutcome, Error>; pub type ApplyResult = Result<ApplyOutcome, Error>;
#[derive(Debug)] #[derive(Eq, PartialEq, Clone, Copy, Debug)]
enum AccountEntry { /// Account modification state. Used to check if the account was
/// Contains account data. /// Modified in between commits and overall.
Cached(Account), enum AccountState {
/// Account has been deleted. /// Account was loaded from disk and never modified in this state object.
Killed, CleanFresh,
/// Account does not exist. /// Account was loaded from the global cache and never modified.
Missing, CleanCached,
/// Account has been modified and is not committed to the trie yet.
/// This is set if any of the account data is changed, including
/// storage and code.
Dirty,
/// Account was modified and committed to the trie.
Committed,
} }
#[derive(Debug)]
/// In-memory copy of the account data. Holds the optional account
/// and the modification status.
/// Account entry can contain existing (`Some`) or non-existing
/// account (`None`)
struct AccountEntry {
account: Option<Account>,
state: AccountState,
}
// Account cache item. Contains account data and
// modification state
impl AccountEntry { impl AccountEntry {
fn is_dirty(&self) -> bool { fn is_dirty(&self) -> bool {
match *self { self.state == AccountState::Dirty
AccountEntry::Cached(ref a) => a.is_dirty(),
AccountEntry::Killed => true,
AccountEntry::Missing => false,
}
} }
/// Clone dirty data into new `AccountEntry`. /// Clone dirty data into new `AccountEntry`. This includes
/// basic account data and modified storage keys.
/// Returns None if clean. /// Returns None if clean.
fn clone_dirty(&self) -> Option<AccountEntry> { fn clone_if_dirty(&self) -> Option<AccountEntry> {
match *self { match self.is_dirty() {
AccountEntry::Cached(ref acc) if acc.is_dirty() => Some(AccountEntry::Cached(acc.clone_dirty())), true => Some(self.clone_dirty()),
AccountEntry::Killed => Some(AccountEntry::Killed), false => None,
_ => None,
} }
} }
/// Clone account entry data that needs to be saved in the snapshot. /// Clone dirty data into new `AccountEntry`. This includes
/// This includes basic account information and all locally cached storage keys /// basic account data and modified storage keys.
fn clone_for_snapshot(&self) -> AccountEntry { fn clone_dirty(&self) -> AccountEntry {
match *self { AccountEntry {
AccountEntry::Cached(ref acc) => AccountEntry::Cached(acc.clone_all()), account: self.account.as_ref().map(Account::clone_dirty),
AccountEntry::Killed => AccountEntry::Killed, state: self.state,
AccountEntry::Missing => AccountEntry::Missing, }
}
// Create a new account entry and mark it as dirty.
fn new_dirty(account: Option<Account>) -> AccountEntry {
AccountEntry {
account: account,
state: AccountState::Dirty,
}
}
// Create a new account entry and mark it as clean.
fn new_clean(account: Option<Account>) -> AccountEntry {
AccountEntry {
account: account,
state: AccountState::CleanFresh,
}
}
// Create a new account entry and mark it as clean and cached.
fn new_clean_cached(account: Option<Account>) -> AccountEntry {
AccountEntry {
account: account,
state: AccountState::CleanCached,
}
}
// Replace data with another entry but preserve storage cache.
fn overwrite_with(&mut self, other: AccountEntry) {
self.state = other.state;
match other.account {
Some(acc) => match self.account {
Some(ref mut ours) => {
ours.overwrite_with(acc);
},
None => {},
},
None => self.account = None,
} }
} }
} }
@ -90,6 +142,9 @@ impl AccountEntry {
/// locally from previous commits. Global cache reflects the database /// locally from previous commits. Global cache reflects the database
/// state and never contains any changes. /// state and never contains any changes.
/// ///
/// Cache items contains account data, or the flag that account does not exist
/// and modification state (see `AccountState`)
///
/// Account data can be in the following cache states: /// Account data can be in the following cache states:
/// * In global but not local - something that was queried from the database, /// * In global but not local - something that was queried from the database,
/// but never modified /// but never modified
@ -103,12 +158,32 @@ impl AccountEntry {
/// then global state cache. If data is not found in any of the caches /// then global state cache. If data is not found in any of the caches
/// it is loaded from the DB to the local cache. /// it is loaded from the DB to the local cache.
/// ///
/// Upon destruction all the local cache data merged into the global cache. /// **** IMPORTANT *************************************************************
/// The merge might be rejected if current state is non-canonical. /// All the modifications to the account data must set the `Dirty` state in the
/// `AccountEntry`. This is done in `require` and `require_or_from`. So just
/// use that.
/// ****************************************************************************
///
/// Upon destruction all the local cache data propagated into the global cache.
/// Propagated items might be rejected if current state is non-canonical.
///
/// State snapshotting.
///
/// A new snapshot can be created with `snapshot()`. Snapshots can be
/// created in a hierarchy.
/// When a snapshot is active all changes are applied directly into
/// `cache` and the original value is copied into an active snapshot.
/// Reverting a snapshot with `revert_to_snapshot` involves copying
/// original values from the latest snapshot back into `cache`. The code
/// takes care not to overwrite cached storage while doing that.
/// Snapshot can be discateded with `discard_snapshot`. All of the orignal
/// backed-up values are moved into a parent snapshot (if any).
///
pub struct State { pub struct State {
db: StateDB, db: StateDB,
root: H256, root: H256,
cache: RefCell<HashMap<Address, AccountEntry>>, cache: RefCell<HashMap<Address, AccountEntry>>,
// The original account is preserved in
snapshots: RefCell<Vec<HashMap<Address, Option<AccountEntry>>>>, snapshots: RefCell<Vec<HashMap<Address, Option<AccountEntry>>>>,
account_start_nonce: U256, account_start_nonce: U256,
factories: Factories, factories: Factories,
@ -162,35 +237,48 @@ impl State {
Ok(state) Ok(state)
} }
/// Create a recoverable snaphot of this state /// Create a recoverable snaphot of this state.
pub fn snapshot(&mut self) { pub fn snapshot(&mut self) {
self.snapshots.borrow_mut().push(HashMap::new()); self.snapshots.borrow_mut().push(HashMap::new());
} }
/// Merge last snapshot with previous /// Merge last snapshot with previous.
pub fn clear_snapshot(&mut self) { pub fn discard_snapshot(&mut self) {
// merge with previous snapshot // merge with previous snapshot
let last = self.snapshots.borrow_mut().pop(); let last = self.snapshots.borrow_mut().pop();
if let Some(mut snapshot) = last { if let Some(mut snapshot) = last {
if let Some(ref mut prev) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut prev) = self.snapshots.borrow_mut().last_mut() {
if prev.is_empty() {
**prev = snapshot;
} else {
for (k, v) in snapshot.drain() { for (k, v) in snapshot.drain() {
prev.entry(k).or_insert(v); prev.entry(k).or_insert(v);
} }
} }
} }
} }
}
/// Revert to snapshot /// Revert to the last snapshot and discard it.
pub fn revert_snapshot(&mut self) { pub fn revert_to_snapshot(&mut self) {
if let Some(mut snapshot) = self.snapshots.borrow_mut().pop() { if let Some(mut snapshot) = self.snapshots.borrow_mut().pop() {
for (k, v) in snapshot.drain() { for (k, v) in snapshot.drain() {
match v { match v {
Some(v) => { Some(v) => {
self.cache.borrow_mut().insert(k, v); match self.cache.borrow_mut().entry(k) {
Entry::Occupied(mut e) => {
// Merge snapshotted changes back into the main account
// storage preserving the cache.
e.get_mut().overwrite_with(v);
},
Entry::Vacant(e) => {
e.insert(v);
}
}
}, },
None => { None => {
match self.cache.borrow_mut().entry(k) { match self.cache.borrow_mut().entry(k) {
::std::collections::hash_map::Entry::Occupied(e) => { Entry::Occupied(e) => {
if e.get().is_dirty() { if e.get().is_dirty() {
e.remove(); e.remove();
} }
@ -204,26 +292,33 @@ impl State {
} }
fn insert_cache(&self, address: &Address, account: AccountEntry) { fn insert_cache(&self, address: &Address, account: AccountEntry) {
// Dirty account which is not in the cache means this is a new account.
// It goes directly into the snapshot as there's nothing to rever to.
//
// In all other cases account is read as clean first, and after that made
// dirty in and added to the snapshot with `note_cache`.
if account.is_dirty() {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account));
return; return;
} }
} }
}
self.cache.borrow_mut().insert(address.clone(), account); self.cache.borrow_mut().insert(address.clone(), account);
} }
fn note_cache(&self, address: &Address) { fn note_cache(&self, address: &Address) {
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
if !snapshot.contains_key(address) { if !snapshot.contains_key(address) {
snapshot.insert(address.clone(), self.cache.borrow().get(address).map(AccountEntry::clone_for_snapshot)); snapshot.insert(address.clone(), self.cache.borrow().get(address).map(AccountEntry::clone_dirty));
} }
} }
} }
/// Destroy the current object and return root and database. /// Destroy the current object and return root and database.
pub fn drop(mut self) -> (H256, StateDB) { pub fn drop(mut self) -> (H256, StateDB) {
self.commit_cache(); self.propagate_to_global_cache();
(self.root, self.db) (self.root, self.db)
} }
@ -235,12 +330,12 @@ impl State {
/// Create a new contract at address `contract`. If there is already an account at the address /// Create a new contract at address `contract`. If there is already an account at the address
/// it will have its code reset, ready for `init_code()`. /// it will have its code reset, ready for `init_code()`.
pub fn new_contract(&mut self, contract: &Address, balance: U256) { pub fn new_contract(&mut self, contract: &Address, balance: U256) {
self.insert_cache(contract, AccountEntry::Cached(Account::new_contract(balance, self.account_start_nonce))); self.insert_cache(contract, AccountEntry::new_dirty(Some(Account::new_contract(balance, self.account_start_nonce))));
} }
/// Remove an existing account. /// Remove an existing account.
pub fn kill_account(&mut self, account: &Address) { pub fn kill_account(&mut self, account: &Address) {
self.insert_cache(account, AccountEntry::Killed); self.insert_cache(account, AccountEntry::new_dirty(None));
} }
/// Determine whether an account exists. /// Determine whether an account exists.
@ -272,8 +367,8 @@ impl State {
let local_cache = self.cache.borrow_mut(); let local_cache = self.cache.borrow_mut();
let mut local_account = None; let mut local_account = None;
if let Some(maybe_acc) = local_cache.get(address) { if let Some(maybe_acc) = local_cache.get(address) {
match *maybe_acc { match maybe_acc.account {
AccountEntry::Cached(ref account) => { Some(ref account) => {
if let Some(value) = account.cached_storage_at(key) { if let Some(value) = account.cached_storage_at(key) {
return value; return value;
} else { } else {
@ -292,7 +387,7 @@ impl State {
return result; return result;
} }
if let Some(ref mut acc) = local_account { if let Some(ref mut acc) = local_account {
if let AccountEntry::Cached(ref account) = **acc { if let Some(ref account) = acc.account {
let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(address)); let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(address));
return account.storage_at(account_db.as_hashdb(), key) return account.storage_at(account_db.as_hashdb(), key)
} else { } else {
@ -314,10 +409,7 @@ impl State {
let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address));
a.storage_at(account_db.as_hashdb(), key) a.storage_at(account_db.as_hashdb(), key)
}); });
match maybe_acc { self.insert_cache(address, AccountEntry::new_clean(maybe_acc));
Some(account) => self.insert_cache(address, AccountEntry::Cached(account)),
None => self.insert_cache(address, AccountEntry::Missing),
}
r r
} }
@ -341,14 +433,18 @@ impl State {
/// Add `incr` to the balance of account `a`. /// Add `incr` to the balance of account `a`.
pub fn add_balance(&mut self, a: &Address, incr: &U256) { pub fn add_balance(&mut self, a: &Address, incr: &U256) {
trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)); trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a));
if !incr.is_zero() || !self.exists(a) {
self.require(a, false).add_balance(incr); self.require(a, false).add_balance(incr);
} }
}
/// Subtract `decr` from the balance of account `a`. /// Subtract `decr` from the balance of account `a`.
pub fn sub_balance(&mut self, a: &Address, decr: &U256) { pub fn sub_balance(&mut self, a: &Address, decr: &U256) {
trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)); trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a));
if !decr.is_zero() || !self.exists(a) {
self.require(a, false).sub_balance(decr); self.require(a, false).sub_balance(decr);
} }
}
/// Subtracts `by` from the balance of `from` and adds it to that of `to`. /// Subtracts `by` from the balance of `from` and adds it to that of `to`.
pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256) { pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256) {
@ -363,8 +459,10 @@ impl State {
/// Mutate storage of account `a` so that it is `value` for `key`. /// Mutate storage of account `a` so that it is `value` for `key`.
pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) { pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) {
if self.storage_at(a, &key) != value {
self.require(a, false).set_storage(key, value) self.require(a, false).set_storage(key, value)
} }
}
/// Initialise the code of account `a` so that it is `code`. /// Initialise the code of account `a` so that it is `code`.
/// NOTE: Account should have been created with `new_contract`. /// NOTE: Account should have been created with `new_contract`.
@ -404,10 +502,9 @@ impl State {
accounts: &mut HashMap<Address, AccountEntry> accounts: &mut HashMap<Address, AccountEntry>
) -> Result<(), Error> { ) -> Result<(), Error> {
// first, commit the sub trees. // first, commit the sub trees.
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) {
for (address, ref mut a) in accounts.iter_mut() { match a.account {
match a { Some(ref mut account) => {
&mut&mut AccountEntry::Cached(ref mut account) if account.is_dirty() => {
db.note_account_bloom(&address); db.note_account_bloom(&address);
let addr_hash = account.address_hash(address); let addr_hash = account.address_hash(address);
let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash); let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash);
@ -420,17 +517,15 @@ impl State {
{ {
let mut trie = factories.trie.from_existing(db.as_hashdb_mut(), root).unwrap(); let mut trie = factories.trie.from_existing(db.as_hashdb_mut(), root).unwrap();
for (address, ref mut a) in accounts.iter_mut() { for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) {
match **a { a.state = AccountState::Committed;
AccountEntry::Cached(ref mut account) if account.is_dirty() => { match a.account {
account.set_clean(); Some(ref mut account) => {
try!(trie.insert(address, &account.rlp())); try!(trie.insert(address, &account.rlp()));
}, },
AccountEntry::Killed => { None => {
try!(trie.remove(address)); try!(trie.remove(address));
**a = AccountEntry::Missing;
}, },
_ => {},
} }
} }
} }
@ -438,20 +533,12 @@ impl State {
Ok(()) Ok(())
} }
fn commit_cache(&mut self) { /// Propagate local cache into shared canonical state cache.
fn propagate_to_global_cache(&mut self) {
let mut addresses = self.cache.borrow_mut(); let mut addresses = self.cache.borrow_mut();
for (address, a) in addresses.drain() { trace!("Committing cache {:?} entries", addresses.len());
match a { for (address, a) in addresses.drain().filter(|&(_, ref a)| a.state == AccountState::Committed || a.state == AccountState::CleanFresh) {
AccountEntry::Cached(account) => { self.db.add_to_account_cache(address, a.account, a.state == AccountState::Committed);
if !account.is_dirty() {
self.db.cache_account(address, Some(account));
}
},
AccountEntry::Missing => {
self.db.cache_account(address, None);
},
_ => {},
}
} }
} }
@ -473,7 +560,7 @@ impl State {
assert!(self.snapshots.borrow().is_empty()); assert!(self.snapshots.borrow().is_empty());
for (add, acc) in accounts.drain().into_iter() { for (add, acc) in accounts.drain().into_iter() {
self.db.note_account_bloom(&add); self.db.note_account_bloom(&add);
self.cache.borrow_mut().insert(add, AccountEntry::Cached(Account::from_pod(acc))); self.cache.borrow_mut().insert(add, AccountEntry::new_dirty(Some(Account::from_pod(acc))));
} }
} }
@ -483,7 +570,7 @@ impl State {
// TODO: handle database rather than just the cache. // TODO: handle database rather than just the cache.
// will need fat db. // will need fat db.
PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
if let AccountEntry::Cached(ref acc) = *opt { if let Some(ref acc) = opt.account {
m.insert(add.clone(), PodAccount::from_account(acc)); m.insert(add.clone(), PodAccount::from_account(acc));
} }
m m
@ -530,7 +617,7 @@ impl State {
where F: Fn(Option<&Account>) -> U { where F: Fn(Option<&Account>) -> U {
// check local cache first // check local cache first
if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) {
if let AccountEntry::Cached(ref mut account) = **maybe_acc { if let Some(ref mut account) = maybe_acc.account {
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a));
Self::update_account_cache(require, account, accountdb.as_hashdb()); Self::update_account_cache(require, account, accountdb.as_hashdb());
return f(Some(account)); return f(Some(account));
@ -562,10 +649,7 @@ impl State {
Self::update_account_cache(require, account, accountdb.as_hashdb()); Self::update_account_cache(require, account, accountdb.as_hashdb());
} }
let r = f(maybe_acc.as_ref()); let r = f(maybe_acc.as_ref());
match maybe_acc { self.insert_cache(a, AccountEntry::new_clean(maybe_acc));
Some(account) => self.insert_cache(a, AccountEntry::Cached(account)),
None => self.insert_cache(a, AccountEntry::Missing),
}
r r
} }
} }
@ -584,36 +668,38 @@ impl State {
let contains_key = self.cache.borrow().contains_key(a); let contains_key = self.cache.borrow().contains_key(a);
if !contains_key { if !contains_key {
match self.db.get_cached_account(a) { match self.db.get_cached_account(a) {
Some(Some(acc)) => self.insert_cache(a, AccountEntry::Cached(acc)), Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)),
Some(None) => self.insert_cache(a, AccountEntry::Missing),
None => { None => {
let maybe_acc = if self.db.check_account_bloom(a) { let maybe_acc = if self.db.check_account_bloom(a) {
let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
let maybe_acc = match db.get(a) { let maybe_acc = match db.get(a) {
Ok(Some(acc)) => AccountEntry::Cached(Account::from_rlp(acc)), Ok(Some(acc)) => AccountEntry::new_clean(Some(Account::from_rlp(acc))),
Ok(None) => AccountEntry::Missing, Ok(None) => AccountEntry::new_clean(None),
Err(e) => panic!("Potential DB corruption encountered: {}", e), Err(e) => panic!("Potential DB corruption encountered: {}", e),
}; };
maybe_acc maybe_acc
} }
else { else {
AccountEntry::Missing AccountEntry::new_clean(None)
}; };
self.insert_cache(a, maybe_acc); self.insert_cache(a, maybe_acc);
} }
} }
} else { }
self.note_cache(a); self.note_cache(a);
match &mut self.cache.borrow_mut().get_mut(a).unwrap().account {
&mut Some(ref mut acc) => not_default(acc),
slot => *slot = Some(default()),
} }
match self.cache.borrow_mut().get_mut(a).unwrap() { // at this point the account is guaranteed to be in the cache.
&mut AccountEntry::Cached(ref mut acc) => not_default(acc),
slot => *slot = AccountEntry::Cached(default()),
}
RefMut::map(self.cache.borrow_mut(), |c| { RefMut::map(self.cache.borrow_mut(), |c| {
match c.get_mut(a).unwrap() { let mut entry = c.get_mut(a).unwrap();
&mut AccountEntry::Cached(ref mut account) => { // set the dirty flag after changing account data.
entry.state = AccountState::Dirty;
match entry.account {
Some(ref mut account) => {
if require_code { if require_code {
let addr_hash = account.address_hash(a); let addr_hash = account.address_hash(a);
let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash);
@ -638,7 +724,7 @@ impl Clone for State {
let cache = { let cache = {
let mut cache: HashMap<Address, AccountEntry> = HashMap::new(); let mut cache: HashMap<Address, AccountEntry> = HashMap::new();
for (key, val) in self.cache.borrow().iter() { for (key, val) in self.cache.borrow().iter() {
if let Some(entry) = val.clone_dirty() { if let Some(entry) = val.clone_if_dirty() {
cache.insert(key.clone(), entry); cache.insert(key.clone(), entry);
} }
} }
@ -1679,12 +1765,12 @@ fn snapshot_basic() {
state.snapshot(); state.snapshot();
state.add_balance(&a, &U256::from(69u64)); state.add_balance(&a, &U256::from(69u64));
assert_eq!(state.balance(&a), U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64));
state.clear_snapshot(); state.discard_snapshot();
assert_eq!(state.balance(&a), U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64));
state.snapshot(); state.snapshot();
state.add_balance(&a, &U256::from(1u64)); state.add_balance(&a, &U256::from(1u64));
assert_eq!(state.balance(&a), U256::from(70u64)); assert_eq!(state.balance(&a), U256::from(70u64));
state.revert_snapshot(); state.revert_to_snapshot();
assert_eq!(state.balance(&a), U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64));
} }
@ -1697,9 +1783,9 @@ fn snapshot_nested() {
state.snapshot(); state.snapshot();
state.add_balance(&a, &U256::from(69u64)); state.add_balance(&a, &U256::from(69u64));
assert_eq!(state.balance(&a), U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64));
state.clear_snapshot(); state.discard_snapshot();
assert_eq!(state.balance(&a), U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64));
state.revert_snapshot(); state.revert_to_snapshot();
assert_eq!(state.balance(&a), U256::from(0)); assert_eq!(state.balance(&a), U256::from(0));
} }

View File

@ -14,56 +14,94 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{VecDeque, HashSet};
use lru_cache::LruCache; use lru_cache::LruCache;
use util::journaldb::JournalDB; use util::journaldb::JournalDB;
use util::hash::{H256}; use util::hash::{H256};
use util::hashdb::HashDB; use util::hashdb::HashDB;
use state::Account; use state::Account;
use header::BlockNumber;
use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable}; use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable};
use bloom_journal::{Bloom, BloomJournal}; use bloom_journal::{Bloom, BloomJournal};
use db::COL_ACCOUNT_BLOOM; use db::COL_ACCOUNT_BLOOM;
use byteorder::{LittleEndian, ByteOrder}; use byteorder::{LittleEndian, ByteOrder};
const STATE_CACHE_ITEMS: usize = 65536; const STATE_CACHE_ITEMS: usize = 256000;
const STATE_CACHE_BLOCKS: usize = 8;
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576; pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000; pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count"; pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
/// Shared canonical state cache.
struct AccountCache { struct AccountCache {
/// DB Account cache. `None` indicates that account is known to be missing. /// DB Account cache. `None` indicates that account is known to be missing.
accounts: LruCache<Address, Option<Account>>, accounts: LruCache<Address, Option<Account>>,
/// Information on the modifications in recently committed blocks; specifically which addresses
/// changed in which block. Ordered by block number.
modifications: VecDeque<BlockChanges>,
}
/// Buffered account cache item.
struct CacheQueueItem {
/// Account address.
address: Address,
/// Acccount data or `None` if account does not exist.
account: Option<Account>,
/// Indicates that the account was modified before being
/// added to the cache.
modified: bool,
}
#[derive(Debug)]
/// Accumulates a list of accounts changed in a block.
struct BlockChanges {
/// Block number.
number: BlockNumber,
/// Block hash.
hash: H256,
/// Parent block hash.
parent: H256,
/// A set of modified account addresses.
accounts: HashSet<Address>,
/// Block is part of the canonical chain.
is_canon: bool,
} }
/// State database abstraction. /// State database abstraction.
/// Manages shared global state cache. /// Manages shared global state cache which reflects the canonical
/// state as it is on the disk. All the entries in the cache are clean.
/// A clone of `StateDB` may be created as canonical or not. /// A clone of `StateDB` may be created as canonical or not.
/// For canonical clones cache changes are accumulated and applied /// For canonical clones local cache is accumulated and applied
/// on commit. /// in `sync_cache`
/// For non-canonical clones cache is cleared on commit. /// For non-canonical clones local cache is dropped.
///
/// Global cache propagation.
/// After a `State` object has been committed to the trie it
/// propagates its local cache into the `StateDB` local cache
/// using `add_to_account_cache` function.
/// Then, after the block has been added to the chain the local cache in the
/// `StateDB` is propagated into the global cache.
pub struct StateDB { pub struct StateDB {
/// Backing database.
db: Box<JournalDB>, db: Box<JournalDB>,
/// Shared canonical state cache.
account_cache: Arc<Mutex<AccountCache>>, account_cache: Arc<Mutex<AccountCache>>,
cache_overlay: Vec<(Address, Option<Account>)>, /// Local dirty cache.
is_canon: bool, local_cache: Vec<CacheQueueItem>,
/// Shared account bloom. Does not handle chain reorganizations.
account_bloom: Arc<Mutex<Bloom>>, account_bloom: Arc<Mutex<Bloom>>,
/// Hash of the block on top of which this instance was created or
/// `None` if cache is disabled
parent_hash: Option<H256>,
/// Hash of the committing block or `None` if not committed yet.
commit_hash: Option<H256>,
/// Number of the committing block or `None` if not committed yet.
commit_number: Option<BlockNumber>,
} }
impl StateDB { impl StateDB {
/// Create a new instance wrapping `JournalDB`
pub fn new(db: Box<JournalDB>) -> StateDB {
let bloom = Self::load_bloom(db.backing());
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache { accounts: LruCache::new(STATE_CACHE_ITEMS) })),
cache_overlay: Vec::new(),
is_canon: false,
account_bloom: Arc::new(Mutex::new(bloom)),
}
}
/// Loads accounts bloom from the database /// Loads accounts bloom from the database
/// This bloom is used to handle request for the non-existant account fast /// This bloom is used to handle request for the non-existant account fast
pub fn load_bloom(db: &Database) -> Bloom { pub fn load_bloom(db: &Database) -> Bloom {
@ -91,6 +129,23 @@ impl StateDB {
bloom bloom
} }
/// Create a new instance wrapping `JournalDB`
pub fn new(db: Box<JournalDB>) -> StateDB {
let bloom = Self::load_bloom(db.backing());
StateDB {
db: db,
account_cache: Arc::new(Mutex::new(AccountCache {
accounts: LruCache::new(STATE_CACHE_ITEMS),
modifications: VecDeque::new(),
})),
local_cache: Vec::new(),
account_bloom: Arc::new(Mutex::new(bloom)),
parent_hash: None,
commit_hash: None,
commit_number: None,
}
}
pub fn check_account_bloom(&self, address: &Address) -> bool { pub fn check_account_bloom(&self, address: &Address) -> bool {
trace!(target: "account_bloom", "Check account bloom: {:?}", address); trace!(target: "account_bloom", "Check account bloom: {:?}", address);
let bloom = self.account_bloom.lock(); let bloom = self.account_bloom.lock();
@ -125,14 +180,107 @@ impl StateDB {
try!(Self::commit_bloom(batch, bloom_lock.drain_journal())); try!(Self::commit_bloom(batch, bloom_lock.drain_journal()));
} }
let records = try!(self.db.commit(batch, now, id, end)); let records = try!(self.db.commit(batch, now, id, end));
if self.is_canon { self.commit_hash = Some(id.clone());
self.commit_cache(); self.commit_number = Some(now);
} else {
self.clear_cache();
}
Ok(records) Ok(records)
} }
/// Propagate local cache into the global cache and synchonize
/// the global cache with the best block state.
/// This function updates the global cache by removing entries
/// that are invalidated by chain reorganization. `sync_cache`
/// should be called after the block has been committed and the
/// blockchain route has ben calculated.
pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) {
trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best);
let mut cache = self.account_cache.lock();
let mut cache = &mut *cache;
// Purge changes from re-enacted and retracted blocks.
// Filter out commiting block if any.
let mut clear = false;
for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.accounts {
trace!("Reverting enacted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.accounts {
trace!("Retracted address {:?}", a);
cache.accounts.remove(a);
}
false
} else {
true
}
};
}
if clear {
// We don't know anything about the block; clear everything
trace!("Wiping cache");
cache.accounts.clear();
cache.modifications.clear();
}
// Propagate cache only if committing on top of the latest canonical state
// blocks are ordered by number and only one block with a given number is marked as canonical
// (contributed to canonical state cache)
if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) {
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
trace!("committing {} cache entries", self.local_cache.len());
for account in self.local_cache.drain(..) {
if account.modified {
modifications.insert(account.address.clone());
}
if is_best {
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) {
if let Some(new) = account.account {
if account.modified {
existing.overwrite_with(new);
}
continue;
}
}
cache.accounts.insert(account.address, account.account);
}
}
// Save modified accounts. These are ordered by the block number.
let block_changes = BlockChanges {
accounts: modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i);
trace!("inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
/// Returns an interface to HashDB. /// Returns an interface to HashDB.
pub fn as_hashdb(&self) -> &HashDB { pub fn as_hashdb(&self) -> &HashDB {
self.db.as_hashdb() self.db.as_hashdb()
@ -148,20 +296,24 @@ impl StateDB {
StateDB { StateDB {
db: self.db.boxed_clone(), db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(), account_cache: self.account_cache.clone(),
cache_overlay: Vec::new(), local_cache: Vec::new(),
is_canon: false,
account_bloom: self.account_bloom.clone(), account_bloom: self.account_bloom.clone(),
parent_hash: None,
commit_hash: None,
commit_number: None,
} }
} }
/// Clone the database for a canonical state. /// Clone the database for a canonical state.
pub fn boxed_clone_canon(&self) -> StateDB { pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB {
StateDB { StateDB {
db: self.db.boxed_clone(), db: self.db.boxed_clone(),
account_cache: self.account_cache.clone(), account_cache: self.account_cache.clone(),
cache_overlay: Vec::new(), local_cache: Vec::new(),
is_canon: true,
account_bloom: self.account_bloom.clone(), account_bloom: self.account_bloom.clone(),
parent_hash: Some(parent.clone()),
commit_hash: None,
commit_number: None,
} }
} }
@ -180,53 +332,149 @@ impl StateDB {
&*self.db &*self.db
} }
/// Enqueue cache change. /// Add a local cache entry.
pub fn cache_account(&mut self, addr: Address, data: Option<Account>) { /// The entry will be propagated to the global cache in `sync_cache`.
self.cache_overlay.push((addr, data)); /// `modified` indicates that the entry was changed since being read from disk or global cache.
} /// `data` can be set to an existing (`Some`), or non-existing account (`None`).
pub fn add_to_account_cache(&mut self, addr: Address, data: Option<Account>, modified: bool) {
/// Apply pending cache changes. self.local_cache.push(CacheQueueItem {
fn commit_cache(&mut self) { address: addr,
let mut cache = self.account_cache.lock(); account: data,
for (address, account) in self.cache_overlay.drain(..) { modified: modified,
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&address) { })
if let Some(new) = account {
existing.merge_with(new);
continue;
}
}
cache.accounts.insert(address, account);
}
}
/// Clear the cache.
pub fn clear_cache(&mut self) {
self.cache_overlay.clear();
let mut cache = self.account_cache.lock();
cache.accounts.clear();
} }
/// Get basic copy of the cached account. Does not include storage. /// Get basic copy of the cached account. Does not include storage.
/// Returns 'None' if the state is non-canonical and cache is disabled /// Returns 'None' if cache is disabled or if the account is not cached.
/// or if the account is not cached.
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> { pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
if !self.is_canon { let mut cache = self.account_cache.lock();
if !Self::is_allowed(addr, &self.parent_hash, &cache.modifications) {
return None; return None;
} }
let mut cache = self.account_cache.lock();
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic())) cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
} }
/// Get value from a cached account. /// Get value from a cached account.
/// Returns 'None' if the state is non-canonical and cache is disabled /// Returns 'None' if cache is disabled or if the account is not cached.
/// or if the account is not cached.
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U> pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U { where F: FnOnce(Option<&mut Account>) -> U {
if !self.is_canon { let mut cache = self.account_cache.lock();
if !Self::is_allowed(a, &self.parent_hash, &cache.modifications) {
return None; return None;
} }
let mut cache = self.account_cache.lock();
cache.accounts.get_mut(a).map(|c| f(c.as_mut())) cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
} }
/// Check if the account can be returned from cache by matching current block parent hash against canonical
/// state and filtering out account modified in later blocks.
fn is_allowed(addr: &Address, parent_hash: &Option<H256>, modifications: &VecDeque<BlockChanges>) -> bool {
let mut parent = match *parent_hash {
None => {
trace!("Cache lookup skipped for {:?}: no parent hash", addr);
return false;
}
Some(ref parent) => parent,
};
if modifications.is_empty() {
return true;
}
// Ignore all accounts modified in later blocks
// Modifications contains block ordered by the number
// We search for our parent in that list first and then for
// all its parent until we hit the canonical block,
// checking against all the intermediate modifications.
let mut iter = modifications.iter();
while let Some(ref m) = iter.next() {
if &m.hash == parent {
if m.is_canon {
return true;
}
parent = &m.parent;
}
if m.accounts.contains(addr) {
trace!("Cache lookup skipped for {:?}: modified in a later block", addr);
return false;
}
}
trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr);
return false;
}
}
#[cfg(test)]
mod tests {
use util::{U256, H256, FixedHash, Address, DBTransaction};
use tests::helpers::*;
use state::Account;
use util::log::init_log;
#[test]
fn state_db_smoke() {
init_log();
let mut state_db_result = get_temp_state_db();
let state_db = state_db_result.take();
let root_parent = H256::random();
let address = Address::random();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut batch = DBTransaction::new(state_db.journal_db().backing());
// blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ]
// balance [ 5 5 4 3 2 2 ]
let mut s = state_db.boxed_clone_canon(&root_parent);
s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false);
s.commit(&mut batch, 0, &h0, None).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.commit(&mut batch, 1, &h1a, None).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h0);
s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true);
s.commit(&mut batch, 1, &h1b, None).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1b);
s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true);
s.commit(&mut batch, 2, &h2b, None).unwrap();
s.sync_cache(&[], &[], false);
let mut s = state_db.boxed_clone_canon(&h1a);
s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true);
s.commit(&mut batch, 2, &h2a, None).unwrap();
s.sync_cache(&[], &[], true);
let mut s = state_db.boxed_clone_canon(&h2a);
s.commit(&mut batch, 3, &h3a, None).unwrap();
s.sync_cache(&[], &[], true);
let s = state_db.boxed_clone_canon(&h3a);
assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5));
let s = state_db.boxed_clone_canon(&h1a);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h2b);
assert!(s.get_cached_account(&address).is_none());
let s = state_db.boxed_clone_canon(&h1b);
assert!(s.get_cached_account(&address).is_none());
// reorg to 3b
// blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ]
let mut s = state_db.boxed_clone_canon(&h2b);
s.commit(&mut batch, 3, &h3b, None).unwrap();
s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true);
let s = state_db.boxed_clone_canon(&h3a);
assert!(s.get_cached_account(&address).is_none());
}
} }

View File

@ -57,7 +57,11 @@ fn should_return_registrar() {
IoChannel::disconnected(), IoChannel::disconnected(),
&db_config &db_config
).unwrap(); ).unwrap();
assert_eq!(client.additional_params().get("registrar"), Some(&"52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d".to_owned())); let params = client.additional_params();
let address = params.get("registrar").unwrap();
assert_eq!(address.len(), 40);
assert!(U256::from_str(address).is_ok());
} }
#[test] #[test]

View File

@ -16,4 +16,5 @@
pub mod helpers; pub mod helpers;
mod client; mod client;
#[cfg(feature="ipc")]
mod rpc; mod rpc;

View File

@ -19,7 +19,8 @@
use nanoipc; use nanoipc;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{Ordering, AtomicBool}; use std::sync::atomic::{Ordering, AtomicBool};
use client::{Client, BlockChainClient, ClientConfig, RemoteClient, BlockID}; use client::{Client, BlockChainClient, ClientConfig, BlockID};
use client::remote::RemoteClient;
use tests::helpers::*; use tests::helpers::*;
use devtools::*; use devtools::*;
use miner::Miner; use miner::Miner;

View File

@ -256,16 +256,6 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
return; return;
} }
// at first, let's insert new block traces
{
let mut traces = self.traces.write();
// it's important to use overwrite here,
// cause this value might be queried by hash later
batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
// note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection
self.note_used(CacheID::Trace(request.block_hash.clone()));
}
// now let's rebuild the blooms // now let's rebuild the blooms
if !request.enacted.is_empty() { if !request.enacted.is_empty() {
let range_start = request.block_number as Number + 1 - request.enacted.len(); let range_start = request.block_number as Number + 1 - request.enacted.len();
@ -276,12 +266,25 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
// all traces are expected to be found here. That's why `expect` has been used // all traces are expected to be found here. That's why `expect` has been used
// instead of `filter_map`. If some traces haven't been found, it meens that // instead of `filter_map`. If some traces haven't been found, it meens that
// traces database is corrupted or incomplete. // traces database is corrupted or incomplete.
.map(|block_hash| self.traces(block_hash).expect("Traces database is incomplete.")) .map(|block_hash| if block_hash == &request.block_hash {
.map(|block_traces| block_traces.bloom()) request.traces.bloom()
} else {
self.traces(block_hash).expect("Traces database is incomplete.").bloom()
})
.map(blooms::Bloom::from) .map(blooms::Bloom::from)
.map(Into::into) .map(Into::into)
.collect(); .collect();
// insert new block traces into the cache and the database
{
let mut traces = self.traces.write();
// it's important to use overwrite here,
// cause this value might be queried by hash later
batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
// note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection
self.note_used(CacheID::Trace(request.block_hash.clone()));
}
let chain = BloomGroupChain::new(self.bloom_config, self); let chain = BloomGroupChain::new(self.bloom_config, self);
let trace_blooms = chain.replace(&replaced_range, enacted_blooms); let trace_blooms = chain.replace(&replaced_range, enacted_blooms);
let blooms_to_insert = trace_blooms.into_iter() let blooms_to_insert = trace_blooms.into_iter()

View File

@ -22,7 +22,7 @@ use client::BlockID;
use log_entry::LogEntry; use log_entry::LogEntry;
/// Blockchain Filter. /// Blockchain Filter.
#[derive(Binary)] #[derive(Binary, Debug, PartialEq)]
pub struct Filter { pub struct Filter {
/// Blockchain will be searched from this block. /// Blockchain will be searched from this block.
pub from_block: BlockID, pub from_block: BlockID,

View File

@ -76,15 +76,14 @@ impl DiskDirectory {
.map(|entry| entry.path()) .map(|entry| entry.path())
.collect::<Vec<PathBuf>>(); .collect::<Vec<PathBuf>>();
let files: Result<Vec<_>, _> = paths.iter() paths
.map(fs::File::open) .iter()
.collect(); .map(|p| (
fs::File::open(p)
let files = try!(files); .map_err(Error::from)
.and_then(|r| json::KeyFile::load(r).map_err(|e| Error::Custom(format!("{:?}", e)))),
files.into_iter() p
.map(json::KeyFile::load) ))
.zip(paths.into_iter())
.map(|(file, path)| match file { .map(|(file, path)| match file {
Ok(file) => Ok((path.clone(), SafeAccount::from_file( Ok(file) => Ok((path.clone(), SafeAccount::from_file(
file, Some(path.file_name().and_then(|n| n.to_str()).expect("Keys have valid UTF8 names only.").to_owned()) file, Some(path.file_name().and_then(|n| n.to_str()).expect("Keys have valid UTF8 names only.").to_owned())

View File

@ -56,7 +56,7 @@ pub fn expand(src: &std::path::Path, dst: &std::path::Path) {
} }
#[cfg(feature = "with-syntex")] #[cfg(feature = "with-syntex")]
pub fn register(reg: &mut syntex::Registry) { pub fn register_cleaner(reg: &mut syntex::Registry) {
use syntax::{ast, fold}; use syntax::{ast, fold};
#[cfg(feature = "with-syntex")] #[cfg(feature = "with-syntex")]
@ -66,6 +66,7 @@ pub fn register(reg: &mut syntex::Registry) {
fn fold_attribute(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> { fn fold_attribute(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
match attr.node.value.node { match attr.node.value.node {
ast::MetaItemKind::List(ref n, _) if n == &"ipc" => { return None; } ast::MetaItemKind::List(ref n, _) if n == &"ipc" => { return None; }
ast::MetaItemKind::Word(ref n) if n == &"ipc" => { return None; }
_ => {} _ => {}
} }
@ -80,13 +81,18 @@ pub fn register(reg: &mut syntex::Registry) {
fold::Folder::fold_crate(&mut StripAttributeFolder, krate) fold::Folder::fold_crate(&mut StripAttributeFolder, krate)
} }
reg.add_post_expansion_pass(strip_attributes);
}
#[cfg(feature = "with-syntex")]
pub fn register(reg: &mut syntex::Registry) {
reg.add_attr("feature(custom_derive)"); reg.add_attr("feature(custom_derive)");
reg.add_attr("feature(custom_attribute)"); reg.add_attr("feature(custom_attribute)");
reg.add_decorator("ipc", codegen::expand_ipc_implementation); reg.add_decorator("ipc", codegen::expand_ipc_implementation);
reg.add_decorator("derive_Binary", serialization::expand_serialization_implementation); reg.add_decorator("derive_Binary", serialization::expand_serialization_implementation);
reg.add_post_expansion_pass(strip_attributes); register_cleaner(reg);
} }
#[cfg(not(feature = "with-syntex"))] #[cfg(not(feature = "with-syntex"))]
@ -104,7 +110,34 @@ pub fn register(reg: &mut rustc_plugin::Registry) {
} }
#[derive(Debug)] #[derive(Debug)]
pub enum Error { InvalidFileName, ExpandFailure } pub enum Error { InvalidFileName, ExpandFailure, Io(std::io::Error) }
impl std::convert::From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::Io(err)
}
}
pub fn derive_ipc_cond(src_path: &str, has_feature: bool) -> Result<(), Error> {
if has_feature { derive_ipc(src_path) }
else { cleanup_ipc(src_path) }
}
pub fn cleanup_ipc(src_path: &str) -> Result<(), Error> {
use std::env;
use std::path::{Path, PathBuf};
let out_dir = env::var_os("OUT_DIR").unwrap();
let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned()));
let mut registry = syntex::Registry::new();
register_cleaner(&mut registry);
if let Err(_) = registry.expand("", &Path::new(src_path), &Path::new(&out_dir).join(&file_name))
{
// will be reported by compiler
return Err(Error::ExpandFailure)
}
Ok(())
}
pub fn derive_ipc(src_path: &str) -> Result<(), Error> { pub fn derive_ipc(src_path: &str) -> Result<(), Error> {
use std::env; use std::env;
@ -113,11 +146,11 @@ pub fn derive_ipc(src_path: &str) -> Result<(), Error> {
let out_dir = env::var_os("OUT_DIR").unwrap(); let out_dir = env::var_os("OUT_DIR").unwrap();
let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned())); let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned()));
let final_path = Path::new(&out_dir).join(&file_name);
let mut intermediate_file_name = file_name.clone(); let mut intermediate_file_name = file_name.clone();
intermediate_file_name.push_str(".rpc.in"); intermediate_file_name.push_str(".rpc.in");
let intermediate_path = Path::new(&out_dir).join(&intermediate_file_name); let intermediate_path = Path::new(&out_dir).join(&intermediate_file_name);
let final_path = Path::new(&out_dir).join(&file_name);
{ {
let mut registry = syntex::Registry::new(); let mut registry = syntex::Registry::new();

View File

@ -13,6 +13,7 @@ nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
ethcore-ipc-nano = { path = "../nano" } ethcore-ipc-nano = { path = "../nano" }
semver = "0.2" semver = "0.2"
log = "0.3" log = "0.3"
time = "0.1"
[build-dependencies] [build-dependencies]
ethcore-ipc-codegen = { path = "../codegen" } ethcore-ipc-codegen = { path = "../codegen" }

View File

@ -22,6 +22,7 @@ extern crate ethcore_ipc as ipc;
extern crate ethcore_ipc_nano as nanoipc; extern crate ethcore_ipc_nano as nanoipc;
extern crate semver; extern crate semver;
#[macro_use] extern crate log; #[macro_use] extern crate log;
extern crate time;
pub mod service; pub mod service;
@ -187,23 +188,40 @@ impl Hypervisor {
} }
/// Waits for every required module to check in /// Waits for every required module to check in
pub fn wait_for_shutdown(&self) { pub fn wait_for_shutdown(&self) -> bool {
use time::{PreciseTime, Duration};
let mut worker = self.ipc_worker.write().unwrap(); let mut worker = self.ipc_worker.write().unwrap();
let start = PreciseTime::now();
while !self.modules_shutdown() { while !self.modules_shutdown() {
worker.poll() worker.poll();
if start.to(PreciseTime::now()) > Duration::seconds(30) {
warn!("Some modules failed to shutdown gracefully, they will be terminated.");
break;
} }
} }
self.modules_shutdown()
}
/// Shutdown the ipc and all managed child processes /// Shutdown the ipc and all managed child processes
pub fn shutdown(&self) { pub fn shutdown(&self) {
let mut childs = self.processes.write().unwrap(); let mut childs = self.processes.write().unwrap();
for (ref mut module, _) in childs.iter_mut() { for (ref module, _) in childs.iter() {
trace!(target: "hypervisor", "Stopping process module: {}", module); trace!(target: "hypervisor", "Stopping process module: {}", module);
self.service.send_shutdown(**module); self.service.send_shutdown(**module);
} }
trace!(target: "hypervisor", "Waiting for shutdown..."); trace!(target: "hypervisor", "Waiting for shutdown...");
self.wait_for_shutdown(); if self.wait_for_shutdown() {
trace!(target: "hypervisor", "All modules reported shutdown"); trace!(target: "hypervisor", "All modules reported shutdown");
return;
}
for (ref module, ref mut process) in childs.iter_mut() {
if self.service.is_running(**module) {
process.kill().unwrap();
trace!("Terminated {}", module);
}
}
} }
} }

View File

@ -39,7 +39,6 @@ pub struct ModuleState {
shutdown: bool, shutdown: bool,
} }
#[ipc] #[ipc]
pub trait ControlService { pub trait ControlService {
fn shutdown(&self) -> bool; fn shutdown(&self) -> bool;
@ -106,6 +105,10 @@ impl HypervisorService {
self.modules.read().unwrap().iter().filter(|&(_, module)| module.started && !module.shutdown).count() self.modules.read().unwrap().iter().filter(|&(_, module)| module.started && !module.shutdown).count()
} }
pub fn is_running(&self, id: IpcModuleId) -> bool {
self.modules.read().unwrap().get(&id).map(|module| module.started && !module.shutdown).unwrap_or(false)
}
pub fn send_shutdown(&self, module_id: IpcModuleId) { pub fn send_shutdown(&self, module_id: IpcModuleId) {
let modules = self.modules.read().unwrap(); let modules = self.modules.read().unwrap();
modules.get(&module_id).map(|module| { modules.get(&module_id).map(|module| {

View File

@ -67,7 +67,8 @@ usd_per_eth = "auto"
price_update_period = "hourly" price_update_period = "hourly"
gas_floor_target = "4700000" gas_floor_target = "4700000"
gas_cap = "6283184" gas_cap = "6283184"
tx_queue_size = 1024 tx_queue_size = 2048
tx_queue_gas = "auto"
tx_gas_limit = "6283184" tx_gas_limit = "6283184"
extra_data = "Parity" extra_data = "Parity"
remove_solved = false remove_solved = false

View File

@ -41,6 +41,7 @@ reseal_on_txs = "all"
reseal_min_period = 4000 reseal_min_period = 4000
price_update_period = "hourly" price_update_period = "hourly"
tx_queue_size = 2048 tx_queue_size = 2048
tx_queue_gas = "auto"
[footprint] [footprint]
tracing = "on" tracing = "on"

View File

@ -193,8 +193,10 @@ usage! {
or |c: &Config| otry!(c.mining).gas_cap.clone(), or |c: &Config| otry!(c.mining).gas_cap.clone(),
flag_extra_data: Option<String> = None, flag_extra_data: Option<String> = None,
or |c: &Config| otry!(c.mining).extra_data.clone().map(Some), or |c: &Config| otry!(c.mining).extra_data.clone().map(Some),
flag_tx_queue_size: usize = 1024usize, flag_tx_queue_size: usize = 2048usize,
or |c: &Config| otry!(c.mining).tx_queue_size.clone(), or |c: &Config| otry!(c.mining).tx_queue_size.clone(),
flag_tx_queue_gas: String = "auto",
or |c: &Config| otry!(c.mining).tx_queue_gas.clone(),
flag_remove_solved: bool = false, flag_remove_solved: bool = false,
or |c: &Config| otry!(c.mining).remove_solved.clone(), or |c: &Config| otry!(c.mining).remove_solved.clone(),
flag_notify_work: Option<String> = None, flag_notify_work: Option<String> = None,
@ -348,6 +350,7 @@ struct Mining {
gas_cap: Option<String>, gas_cap: Option<String>,
extra_data: Option<String>, extra_data: Option<String>,
tx_queue_size: Option<usize>, tx_queue_size: Option<usize>,
tx_queue_gas: Option<String>,
remove_solved: Option<bool>, remove_solved: Option<bool>,
notify_work: Option<Vec<String>>, notify_work: Option<Vec<String>>,
} }
@ -522,7 +525,8 @@ mod tests {
flag_gas_floor_target: "4700000".into(), flag_gas_floor_target: "4700000".into(),
flag_gas_cap: "6283184".into(), flag_gas_cap: "6283184".into(),
flag_extra_data: Some("Parity".into()), flag_extra_data: Some("Parity".into()),
flag_tx_queue_size: 1024usize, flag_tx_queue_size: 2048usize,
flag_tx_queue_gas: "auto".into(),
flag_remove_solved: false, flag_remove_solved: false,
flag_notify_work: Some("http://localhost:3001".into()), flag_notify_work: Some("http://localhost:3001".into()),
@ -673,6 +677,7 @@ mod tests {
gas_floor_target: None, gas_floor_target: None,
gas_cap: None, gas_cap: None,
tx_queue_size: Some(2048), tx_queue_size: Some(2048),
tx_queue_gas: Some("auto".into()),
tx_gas_limit: None, tx_gas_limit: None,
extra_data: None, extra_data: None,
remove_solved: None, remove_solved: None,

View File

@ -44,7 +44,8 @@ Account Options:
ACCOUNTS is a comma-delimited list of addresses. ACCOUNTS is a comma-delimited list of addresses.
Implies --no-signer. (default: {flag_unlock:?}) Implies --no-signer. (default: {flag_unlock:?})
--password FILE Provide a file containing a password for unlocking --password FILE Provide a file containing a password for unlocking
an account. (default: {flag_password:?}) an account. Leading and trailing whitespace is trimmed.
(default: {flag_password:?})
--keys-iterations NUM Specify the number of iterations to use when --keys-iterations NUM Specify the number of iterations to use when
deriving key from the password (bigger is more deriving key from the password (bigger is more
secure) (default: {flag_keys_iterations}). secure) (default: {flag_keys_iterations}).
@ -183,6 +184,10 @@ Sealing/Mining Options:
more than 32 characters. (default: {flag_extra_data:?}) more than 32 characters. (default: {flag_extra_data:?})
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
to be included in next block) (default: {flag_tx_queue_size}). to be included in next block) (default: {flag_tx_queue_size}).
--tx-queue-gas LIMIT Maximum amount of total gas for external transactions in
the queue. LIMIT can be either an amount of gas or
'auto' or 'off'. 'auto' sets the limit to be 2x
the current block gas limit. (default: {flag_tx_queue_gas}).
--remove-solved Move solved blocks from the work package queue --remove-solved Move solved blocks from the work package queue
instead of cloning them. This gives a slightly instead of cloning them. This gives a slightly
faster import speed, but means that extra solutions faster import speed, but means that extra solutions

View File

@ -30,7 +30,7 @@ use rpc::{IpcConfiguration, HttpConfiguration};
use ethcore_rpc::NetworkSettings; use ethcore_rpc::NetworkSettings;
use cache::CacheConfig; use cache::CacheConfig;
use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home,
geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address}; geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_gas_limit};
use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, SpecType}; use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, SpecType};
use ethcore_logger::Config as LogConfig; use ethcore_logger::Config as LogConfig;
use dir::Directories; use dir::Directories;
@ -348,6 +348,7 @@ impl Configuration {
None => U256::max_value(), None => U256::max_value(),
}, },
tx_queue_size: self.args.flag_tx_queue_size, tx_queue_size: self.args.flag_tx_queue_size,
tx_queue_gas_limit: try!(to_gas_limit(&self.args.flag_tx_queue_gas)),
pending_set: try!(to_pending_set(&self.args.flag_relay_set)), pending_set: try!(to_pending_set(&self.args.flag_relay_set)),
reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period), reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period),
work_queue_size: self.args.flag_work_queue_size, work_queue_size: self.args.flag_work_queue_size,

View File

@ -22,7 +22,7 @@ use std::fs::File;
use util::{clean_0x, U256, Uint, Address, path, CompactionProfile}; use util::{clean_0x, U256, Uint, Address, path, CompactionProfile};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig}; use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig};
use ethcore::miner::PendingSet; use ethcore::miner::{PendingSet, GasLimit};
use cache::CacheConfig; use cache::CacheConfig;
use dir::DatabaseDirectories; use dir::DatabaseDirectories;
use upgrade::upgrade; use upgrade::upgrade;
@ -93,6 +93,14 @@ pub fn to_pending_set(s: &str) -> Result<PendingSet, String> {
} }
} }
pub fn to_gas_limit(s: &str) -> Result<GasLimit, String> {
match s {
"auto" => Ok(GasLimit::Auto),
"off" => Ok(GasLimit::None),
other => Ok(GasLimit::Fixed(try!(to_u256(other)))),
}
}
pub fn to_address(s: Option<String>) -> Result<Address, String> { pub fn to_address(s: Option<String>) -> Result<Address, String> {
match s { match s {
Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)), Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)),
@ -273,9 +281,10 @@ pub fn password_prompt() -> Result<String, String> {
pub fn password_from_file<P>(path: P) -> Result<String, String> where P: AsRef<Path> { pub fn password_from_file<P>(path: P) -> Result<String, String> where P: AsRef<Path> {
let mut file = try!(File::open(path).map_err(|_| "Unable to open password file.")); let mut file = try!(File::open(path).map_err(|_| "Unable to open password file."));
let mut file_content = String::new(); let mut file_content = String::new();
try!(file.read_to_string(&mut file_content).map_err(|_| "Unable to read password file.")); match file.read_to_string(&mut file_content) {
// remove eof Ok(_) => Ok(file_content.trim().into()),
Ok((&file_content[..file_content.len() - 1]).to_owned()) Err(_) => Err("Unable to read password file.".into()),
}
} }
/// Reads passwords from files. Treats each line as a separate password. /// Reads passwords from files. Treats each line as a separate password.
@ -294,10 +303,13 @@ pub fn passwords_from_files(files: Vec<String>) -> Result<Vec<String>, String> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::time::Duration; use std::time::Duration;
use std::fs::File;
use std::io::Write;
use devtools::RandomTempPath;
use util::{U256}; use util::{U256};
use ethcore::client::{Mode, BlockID}; use ethcore::client::{Mode, BlockID};
use ethcore::miner::PendingSet; use ethcore::miner::PendingSet;
use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes}; use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes, password_from_file};
#[test] #[test]
fn test_to_duration() { fn test_to_duration() {
@ -380,6 +392,14 @@ mod tests {
); );
} }
#[test]
fn test_password() {
let path = RandomTempPath::new();
let mut file = File::create(path.as_path()).unwrap();
file.write_all(b"a bc ").unwrap();
assert_eq!(password_from_file(path).unwrap().as_bytes(), b"a bc");
}
#[test] #[test]
#[cfg_attr(feature = "dev", allow(float_cmp))] #[cfg_attr(feature = "dev", allow(float_cmp))]
fn test_to_price() { fn test_to_price() {

View File

@ -196,6 +196,9 @@ fn sync_main() -> bool {
} }
fn main() { fn main() {
// Always print backtrace on panic.
::std::env::set_var("RUST_BACKTRACE", "1");
if sync_main() { if sync_main() {
return; return;
} }

View File

@ -68,8 +68,9 @@ pub type SyncModules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>)
#[cfg(feature="ipc")] #[cfg(feature="ipc")]
mod ipc_deps { mod ipc_deps {
pub use ethsync::{SyncClient, NetworkManagerClient, ServiceConfiguration}; pub use ethsync::remote::{SyncClient, NetworkManagerClient};
pub use ethcore::client::ChainNotifyClient; pub use ethsync::ServiceConfiguration;
pub use ethcore::client::remote::ChainNotifyClient;
pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL}; pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL};
pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client}; pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client};
pub use ipc::IpcSocket; pub use ipc::IpcSocket;

View File

@ -206,7 +206,7 @@ impl Default for MinerExtras {
extra_data: version_data(), extra_data: version_data(),
gas_floor_target: U256::from(4_700_000), gas_floor_target: U256::from(4_700_000),
gas_ceil_target: U256::from(6_283_184), gas_ceil_target: U256::from(6_283_184),
transactions_limit: 1024, transactions_limit: 2048,
} }
} }
} }

View File

@ -19,8 +19,9 @@
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL, ControlService}; use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL, ControlService};
use ethcore::client::{RemoteClient, ChainNotify}; use ethcore::client::ChainNotify;
use ethcore::snapshot::{RemoteSnapshotService}; use ethcore::client::remote::RemoteClient;
use ethcore::snapshot::remote::RemoteSnapshotService;
use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration};
use modules::service_urls; use modules::service_urls;
use boot; use boot;

View File

@ -21,7 +21,7 @@ macro_rules! rpc_unimplemented {
} }
use std::fmt; use std::fmt;
use ethcore::error::Error as EthcoreError; use ethcore::error::{Error as EthcoreError, CallError};
use ethcore::account_provider::{Error as AccountError}; use ethcore::account_provider::{Error as AccountError};
use fetch::FetchError; use fetch::FetchError;
use jsonrpc_core::{Error, ErrorCode, Value}; use jsonrpc_core::{Error, ErrorCode, Value};
@ -34,6 +34,7 @@ mod codes {
pub const NO_NEW_WORK: i64 = -32003; pub const NO_NEW_WORK: i64 = -32003;
pub const UNKNOWN_ERROR: i64 = -32009; pub const UNKNOWN_ERROR: i64 = -32009;
pub const TRANSACTION_ERROR: i64 = -32010; pub const TRANSACTION_ERROR: i64 = -32010;
pub const EXECUTION_ERROR: i64 = -32015;
pub const ACCOUNT_LOCKED: i64 = -32020; pub const ACCOUNT_LOCKED: i64 = -32020;
pub const PASSWORD_INVALID: i64 = -32021; pub const PASSWORD_INVALID: i64 = -32021;
pub const ACCOUNT_ERROR: i64 = -32023; pub const ACCOUNT_ERROR: i64 = -32023;
@ -109,6 +110,14 @@ pub fn invalid_params<T: fmt::Debug>(param: &str, details: T) -> Error {
} }
} }
pub fn execution<T: fmt::Debug>(data: T) -> Error {
Error {
code: ErrorCode::ServerError(codes::EXECUTION_ERROR),
message: "Transaction execution error.".into(),
data: Some(Value::String(format!("{:?}", data))),
}
}
pub fn state_pruned() -> Error { pub fn state_pruned() -> Error {
Error { Error {
code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST),
@ -189,13 +198,13 @@ pub fn from_transaction_error(error: EthcoreError) -> Error {
AlreadyImported => "Transaction with the same hash was already imported.".into(), AlreadyImported => "Transaction with the same hash was already imported.".into(),
Old => "Transaction nonce is too low. Try incrementing the nonce.".into(), Old => "Transaction nonce is too low. Try incrementing the nonce.".into(),
TooCheapToReplace => { TooCheapToReplace => {
"Transaction fee is too low. There is another transaction with same nonce in the queue. Try increasing the fee or incrementing the nonce.".into() "Transaction gas price is too low. There is another transaction with same nonce in the queue. Try increasing the gas price or incrementing the nonce.".into()
}, },
LimitReached => { LimitReached => {
"There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.".into() "There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.".into()
}, },
InsufficientGasPrice { minimal, got } => { InsufficientGasPrice { minimal, got } => {
format!("Transaction fee is too low. It does not satisfy your node's minimal fee (minimal: {}, got: {}). Try increasing the fee.", minimal, got) format!("Transaction gas price is too low. It does not satisfy your node's minimal gas price (minimal: {}, got: {}). Try increasing the gas price.", minimal, got)
}, },
InsufficientBalance { balance, cost } => { InsufficientBalance { balance, cost } => {
format!("Insufficient funds. Account you try to send transaction from does not have enough funds. Required {} and got: {}.", cost, balance) format!("Insufficient funds. Account you try to send transaction from does not have enough funds. Required {} and got: {}.", cost, balance)
@ -219,4 +228,10 @@ pub fn from_transaction_error(error: EthcoreError) -> Error {
} }
} }
pub fn from_call_error(error: CallError) -> Error {
match error {
CallError::StatePruned => state_pruned(),
CallError::Execution(e) => execution(e),
CallError::TransactionNotFound => internal("{}, this should not be the case with eth_call, most likely a bug.", CallError::TransactionNotFound),
}
}

View File

@ -33,7 +33,7 @@ use util::{FromHex, Mutex};
use rlp::{self, UntrustedRlp, View}; use rlp::{self, UntrustedRlp, View};
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use ethcore::client::{MiningBlockChainClient, BlockID, TransactionID, UncleID}; use ethcore::client::{MiningBlockChainClient, BlockID, TransactionID, UncleID};
use ethcore::header::Header as BlockHeader; use ethcore::header::{Header as BlockHeader, BlockNumber as EthBlockNumber};
use ethcore::block::IsBlock; use ethcore::block::IsBlock;
use ethcore::views::*; use ethcore::views::*;
use ethcore::ethereum::Ethash; use ethcore::ethereum::Ethash;
@ -198,8 +198,8 @@ impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
} }
} }
pub fn pending_logs<M>(miner: &M, filter: &EthcoreFilter) -> Vec<Log> where M: MinerService { pub fn pending_logs<M>(miner: &M, best_block: EthBlockNumber, filter: &EthcoreFilter) -> Vec<Log> where M: MinerService {
let receipts = miner.pending_receipts(); let receipts = miner.pending_receipts(best_block);
let pending_logs = receipts.into_iter() let pending_logs = receipts.into_iter()
.flat_map(|(hash, r)| r.logs.into_iter().map(|l| (hash.clone(), l)).collect::<Vec<(H256, LogEntry)>>()) .flat_map(|(hash, r)| r.logs.into_iter().map(|l| (hash.clone(), l)).collect::<Vec<(H256, LogEntry)>>())
@ -426,7 +426,8 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
try!(self.active()); try!(self.active());
let hash: H256 = hash.into(); let hash: H256 = hash.into();
let miner = take_weak!(self.miner); let miner = take_weak!(self.miner);
Ok(try!(self.transaction(TransactionID::Hash(hash))).or_else(|| miner.transaction(&hash).map(Into::into))) let client = take_weak!(self.client);
Ok(try!(self.transaction(TransactionID::Hash(hash))).or_else(|| miner.transaction(client.chain_info().best_block_number, &hash).map(Into::into)))
} }
fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result<Option<Transaction>, Error> { fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result<Option<Transaction>, Error> {
@ -445,8 +446,9 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
try!(self.active()); try!(self.active());
let miner = take_weak!(self.miner); let miner = take_weak!(self.miner);
let best_block = take_weak!(self.client).chain_info().best_block_number;
let hash: H256 = hash.into(); let hash: H256 = hash.into();
match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) { match (miner.pending_receipt(best_block, &hash), self.options.allow_pending_receipt_query) {
(Some(receipt), true) => Ok(Some(receipt.into())), (Some(receipt), true) => Ok(Some(receipt.into())),
_ => { _ => {
let client = take_weak!(self.client); let client = take_weak!(self.client);
@ -488,7 +490,8 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
.collect::<Vec<Log>>(); .collect::<Vec<Log>>();
if include_pending { if include_pending {
let pending = pending_logs(&*take_weak!(self.miner), &filter); let best_block = take_weak!(self.client).chain_info().best_block_number;
let pending = pending_logs(&*take_weak!(self.miner), best_block, &filter);
logs.extend(pending); logs.extend(pending);
} }
@ -590,7 +593,10 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
num => take_weak!(self.client).call(&signed, num.into(), Default::default()), num => take_weak!(self.client).call(&signed, num.into(), Default::default()),
}; };
Ok(r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![]))) match r {
Ok(b) => Ok(Bytes(b.output)),
Err(e) => Err(errors::from_call_error(e)),
}
} }
fn estimate_gas(&self, request: CallRequest, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> { fn estimate_gas(&self, request: CallRequest, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> {

View File

@ -81,7 +81,8 @@ impl<C, M> EthFilter for EthFilterClient<C, M>
try!(self.active()); try!(self.active());
let mut polls = self.polls.lock(); let mut polls = self.polls.lock();
let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); let best_block = take_weak!(self.client).chain_info().best_block_number;
let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(best_block);
let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions));
Ok(id.into()) Ok(id.into())
} }
@ -108,7 +109,8 @@ impl<C, M> EthFilter for EthFilterClient<C, M>
}, },
PollFilter::PendingTransaction(ref mut previous_hashes) => { PollFilter::PendingTransaction(ref mut previous_hashes) => {
// get hashes of pending transactions // get hashes of pending transactions
let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); let best_block = take_weak!(self.client).chain_info().best_block_number;
let current_hashes = take_weak!(self.miner).pending_transactions_hashes(best_block);
let new_hashes = let new_hashes =
{ {
@ -149,7 +151,8 @@ impl<C, M> EthFilter for EthFilterClient<C, M>
// additionally retrieve pending logs // additionally retrieve pending logs
if include_pending { if include_pending {
let pending_logs = pending_logs(&*take_weak!(self.miner), &filter); let best_block = take_weak!(self.client).chain_info().best_block_number;
let pending_logs = pending_logs(&*take_weak!(self.miner), best_block, &filter);
// remove logs about which client was already notified about // remove logs about which client was already notified about
let new_pending_logs: Vec<_> = pending_logs.iter() let new_pending_logs: Vec<_> = pending_logs.iter()
@ -190,7 +193,8 @@ impl<C, M> EthFilter for EthFilterClient<C, M>
.collect::<Vec<Log>>(); .collect::<Vec<Log>>();
if include_pending { if include_pending {
logs.extend(pending_logs(&*take_weak!(self.miner), &filter)); let best_block = take_weak!(self.client).chain_info().best_block_number;
logs.extend(pending_logs(&*take_weak!(self.miner), best_block, &filter));
} }
let logs = limit_logs(logs, filter.limit); let logs = limit_logs(logs, filter.limit);

View File

@ -24,7 +24,7 @@ use ethcore::spec::{Genesis, Spec};
use ethcore::block::Block; use ethcore::block::Block;
use ethcore::views::BlockView; use ethcore::views::BlockView;
use ethcore::ethereum; use ethcore::ethereum;
use ethcore::miner::{MinerOptions, GasPricer, MinerService, ExternalMiner, Miner, PendingSet}; use ethcore::miner::{MinerOptions, GasPricer, MinerService, ExternalMiner, Miner, PendingSet, GasLimit};
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use util::Hashable; use util::Hashable;
@ -58,6 +58,7 @@ fn miner_service(spec: &Spec, accounts: Arc<AccountProvider>) -> Arc<Miner> {
reseal_on_own_tx: true, reseal_on_own_tx: true,
tx_queue_size: 1024, tx_queue_size: 1024,
tx_gas_limit: !U256::zero(), tx_gas_limit: !U256::zero(),
tx_queue_gas_limit: GasLimit::None,
pending_set: PendingSet::SealingOrElseQueue, pending_set: PendingSet::SealingOrElseQueue,
reseal_min_period: Duration::from_secs(0), reseal_min_period: Duration::from_secs(0),
work_queue_size: 50, work_queue_size: 50,

View File

@ -21,6 +21,7 @@ use util::standard::*;
use ethcore::error::{Error, CallError}; use ethcore::error::{Error, CallError};
use ethcore::client::{MiningBlockChainClient, Executed, CallAnalytics}; use ethcore::client::{MiningBlockChainClient, Executed, CallAnalytics};
use ethcore::block::{ClosedBlock, IsBlock}; use ethcore::block::{ClosedBlock, IsBlock};
use ethcore::header::BlockNumber;
use ethcore::transaction::SignedTransaction; use ethcore::transaction::SignedTransaction;
use ethcore::receipt::{Receipt, RichReceipt}; use ethcore::receipt::{Receipt, RichReceipt};
use ethcore::miner::{MinerService, MinerStatus, TransactionImportResult}; use ethcore::miner::{MinerService, MinerStatus, TransactionImportResult};
@ -162,7 +163,7 @@ impl MinerService for TestMinerService {
} }
/// Returns hashes of transactions currently in pending /// Returns hashes of transactions currently in pending
fn pending_transactions_hashes(&self) -> Vec<H256> { fn pending_transactions_hashes(&self, _best_block: BlockNumber) -> Vec<H256> {
vec![] vec![]
} }
@ -186,7 +187,7 @@ impl MinerService for TestMinerService {
Some(f(&open_block.close())) Some(f(&open_block.close()))
} }
fn transaction(&self, hash: &H256) -> Option<SignedTransaction> { fn transaction(&self, _best_block: BlockNumber, hash: &H256) -> Option<SignedTransaction> {
self.pending_transactions.lock().get(hash).cloned() self.pending_transactions.lock().get(hash).cloned()
} }
@ -194,13 +195,13 @@ impl MinerService for TestMinerService {
self.pending_transactions.lock().values().cloned().collect() self.pending_transactions.lock().values().cloned().collect()
} }
fn pending_transactions(&self) -> Vec<SignedTransaction> { fn pending_transactions(&self, _best_block: BlockNumber) -> Vec<SignedTransaction> {
self.pending_transactions.lock().values().cloned().collect() self.pending_transactions.lock().values().cloned().collect()
} }
fn pending_receipt(&self, hash: &H256) -> Option<RichReceipt> { fn pending_receipt(&self, _best_block: BlockNumber, hash: &H256) -> Option<RichReceipt> {
// Not much point implementing this since the logic is complex and the only thing it relies on is pending_receipts, which is already tested. // Not much point implementing this since the logic is complex and the only thing it relies on is pending_receipts, which is already tested.
self.pending_receipts().get(hash).map(|r| self.pending_receipts(0).get(hash).map(|r|
RichReceipt { RichReceipt {
transaction_hash: Default::default(), transaction_hash: Default::default(),
transaction_index: Default::default(), transaction_index: Default::default(),
@ -212,7 +213,7 @@ impl MinerService for TestMinerService {
) )
} }
fn pending_receipts(&self) -> BTreeMap<H256, Receipt> { fn pending_receipts(&self, _best_block: BlockNumber) -> BTreeMap<H256, Receipt> {
self.pending_receipts.lock().clone() self.pending_receipts.lock().clone()
} }

View File

@ -85,8 +85,14 @@ impl Into<EthFilter> for Filter {
VariadicValue::Null => None, VariadicValue::Null => None,
VariadicValue::Single(t) => Some(vec![t.into()]), VariadicValue::Single(t) => Some(vec![t.into()]),
VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect()) VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect())
}).filter_map(|m| m).collect()).into_iter(); }).collect()).into_iter();
vec![iter.next(), iter.next(), iter.next(), iter.next()]
vec![
iter.next().unwrap_or(None),
iter.next().unwrap_or(None),
iter.next().unwrap_or(None),
iter.next().unwrap_or(None)
]
}, },
limit: self.limit, limit: self.limit,
} }
@ -121,6 +127,8 @@ mod tests {
use util::hash::*; use util::hash::*;
use super::*; use super::*;
use v1::types::BlockNumber; use v1::types::BlockNumber;
use ethcore::filter::Filter as EthFilter;
use ethcore::client::BlockID;
#[test] #[test]
fn topic_deserialization() { fn topic_deserialization() {
@ -148,4 +156,33 @@ mod tests {
limit: None, limit: None,
}); });
} }
#[test]
fn filter_conversion() {
let filter = Filter {
from_block: Some(BlockNumber::Earliest),
to_block: Some(BlockNumber::Latest),
address: Some(VariadicValue::Multiple(vec![])),
topics: Some(vec![
VariadicValue::Null,
VariadicValue::Single("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into()),
VariadicValue::Null,
]),
limit: None,
};
let eth_filter: EthFilter = filter.into();
assert_eq!(eth_filter, EthFilter {
from_block: BlockID::Earliest,
to_block: BlockID::Latest,
address: Some(vec![]),
topics: vec![
None,
Some(vec!["000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into()]),
None,
None,
],
limit: None,
});
}
} }

View File

@ -17,5 +17,5 @@
extern crate ethcore_ipc_codegen; extern crate ethcore_ipc_codegen;
fn main() { fn main() {
ethcore_ipc_codegen::derive_ipc("src/api.rs").unwrap(); ethcore_ipc_codegen::derive_ipc_cond("src/api.rs", cfg!(feature="ipc")).unwrap();
} }

View File

@ -184,8 +184,8 @@ impl BlockCollection {
{ {
let mut blocks = Vec::new(); let mut blocks = Vec::new();
let mut head = self.head; let mut head = self.head;
while head.is_some() { while let Some(h) = head {
head = self.parents.get(&head.unwrap()).cloned(); head = self.parents.get(&h).cloned();
if let Some(head) = head { if let Some(head) = head {
match self.blocks.get(&head) { match self.blocks.get(&head) {
Some(block) if block.body.is_some() => { Some(block) if block.body.is_some() => {
@ -201,7 +201,7 @@ impl BlockCollection {
for block in blocks.drain(..) { for block in blocks.drain(..) {
let mut block_rlp = RlpStream::new_list(3); let mut block_rlp = RlpStream::new_list(3);
block_rlp.append_raw(&block.header, 1); block_rlp.append_raw(&block.header, 1);
let body = Rlp::new(block.body.as_ref().unwrap()); // incomplete blocks are filtered out in the loop above let body = Rlp::new(block.body.as_ref().expect("blocks contains only full blocks; qed"));
block_rlp.append_raw(body.at(0).as_raw(), 1); block_rlp.append_raw(body.at(0).as_raw(), 1);
block_rlp.append_raw(body.at(1).as_raw(), 1); block_rlp.append_raw(body.at(1).as_raw(), 1);
drained.push(block_rlp.out()); drained.push(block_rlp.out());

View File

@ -90,7 +90,6 @@
use util::*; use util::*;
use rlp::*; use rlp::*;
use network::*; use network::*;
use std::mem::{replace};
use ethcore::views::{HeaderView, BlockView}; use ethcore::views::{HeaderView, BlockView};
use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::header::{BlockNumber, Header as BlockHeader};
use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo, BlockImportError}; use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo, BlockImportError};
@ -123,6 +122,7 @@ const MAX_ROUND_PARENTS: usize = 32;
const MAX_NEW_HASHES: usize = 64; const MAX_NEW_HASHES: usize = 64;
const MAX_TX_TO_IMPORT: usize = 512; const MAX_TX_TO_IMPORT: usize = 512;
const MAX_NEW_BLOCK_AGE: BlockNumber = 20; const MAX_NEW_BLOCK_AGE: BlockNumber = 20;
const MAX_TRANSACTION_SIZE: usize = 300*1024;
const STATUS_PACKET: u8 = 0x00; const STATUS_PACKET: u8 = 0x00;
const NEW_BLOCK_HASHES_PACKET: u8 = 0x01; const NEW_BLOCK_HASHES_PACKET: u8 = 0x01;
@ -143,7 +143,7 @@ const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13;
const SNAPSHOT_DATA_PACKET: u8 = 0x14; const SNAPSHOT_DATA_PACKET: u8 = 0x14;
const HEADERS_TIMEOUT_SEC: f64 = 15f64; const HEADERS_TIMEOUT_SEC: f64 = 15f64;
const BODIES_TIMEOUT_SEC: f64 = 5f64; const BODIES_TIMEOUT_SEC: f64 = 10f64;
const FORK_HEADER_TIMEOUT_SEC: f64 = 3f64; const FORK_HEADER_TIMEOUT_SEC: f64 = 3f64;
const SNAPSHOT_MANIFEST_TIMEOUT_SEC: f64 = 3f64; const SNAPSHOT_MANIFEST_TIMEOUT_SEC: f64 = 3f64;
const SNAPSHOT_DATA_TIMEOUT_SEC: f64 = 10f64; const SNAPSHOT_DATA_TIMEOUT_SEC: f64 = 10f64;
@ -249,8 +249,6 @@ struct PeerInfo {
network_id: U256, network_id: U256,
/// Peer best block hash /// Peer best block hash
latest_hash: H256, latest_hash: H256,
/// Peer best block number if known
latest_number: Option<BlockNumber>,
/// Peer total difficulty if known /// Peer total difficulty if known
difficulty: Option<U256>, difficulty: Option<U256>,
/// Type of data currenty being requested from peer. /// Type of data currenty being requested from peer.
@ -395,6 +393,8 @@ impl ChainSync {
} }
self.syncing_difficulty = From::from(0u64); self.syncing_difficulty = From::from(0u64);
self.state = SyncState::Idle; self.state = SyncState::Idle;
// Reactivate peers only if some progress has been made
// since the last sync round of if starting fresh.
self.active_peers = self.peers.keys().cloned().collect(); self.active_peers = self.peers.keys().cloned().collect();
} }
@ -406,7 +406,8 @@ impl ChainSync {
self.continue_sync(io); self.continue_sync(io);
} }
/// Remove peer from active peer set /// Remove peer from active peer set. Peer will be reactivated on the next sync
/// round.
fn deactivate_peer(&mut self, io: &mut SyncIo, peer_id: PeerId) { fn deactivate_peer(&mut self, io: &mut SyncIo, peer_id: PeerId) {
trace!(target: "sync", "Deactivating peer {}", peer_id); trace!(target: "sync", "Deactivating peer {}", peer_id);
self.active_peers.remove(&peer_id); self.active_peers.remove(&peer_id);
@ -443,7 +444,6 @@ impl ChainSync {
network_id: try!(r.val_at(1)), network_id: try!(r.val_at(1)),
difficulty: Some(try!(r.val_at(2))), difficulty: Some(try!(r.val_at(2))),
latest_hash: try!(r.val_at(3)), latest_hash: try!(r.val_at(3)),
latest_number: None,
genesis: try!(r.val_at(4)), genesis: try!(r.val_at(4)),
asking: PeerAsking::Nothing, asking: PeerAsking::Nothing,
asking_blocks: Vec::new(), asking_blocks: Vec::new(),
@ -480,7 +480,11 @@ impl ChainSync {
} }
self.peers.insert(peer_id.clone(), peer); self.peers.insert(peer_id.clone(), peer);
// Don't activate peer immediatelly when searching for common block.
// Let the current sync round complete first.
if self.state != SyncState::ChainHead {
self.active_peers.insert(peer_id.clone()); self.active_peers.insert(peer_id.clone());
}
debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id));
if let Some((fork_block, _)) = self.fork_block { if let Some((fork_block, _)) = self.fork_block {
self.request_headers_by_number(io, peer_id, fork_block, 1, 0, false, PeerAsking::ForkHeader); self.request_headers_by_number(io, peer_id, fork_block, 1, 0, false, PeerAsking::ForkHeader);
@ -496,7 +500,8 @@ impl ChainSync {
let confirmed = match self.peers.get_mut(&peer_id) { let confirmed = match self.peers.get_mut(&peer_id) {
Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => { Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => {
let item_count = r.item_count(); let item_count = r.item_count();
if item_count == 0 || (item_count == 1 && try!(r.at(0)).as_raw().sha3() == self.fork_block.unwrap().1) { if item_count == 0 || (item_count == 1 &&
try!(r.at(0)).as_raw().sha3() == self.fork_block.expect("ForkHeader state is only entered when fork_block is some; qed").1) {
peer.asking = PeerAsking::Nothing; peer.asking = PeerAsking::Nothing;
if item_count == 0 { if item_count == 0 {
trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id); trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id);
@ -562,7 +567,7 @@ impl ChainSync {
continue; continue;
} }
if self.highest_block == None || number > self.highest_block.unwrap() { if self.highest_block.as_ref().map_or(true, |n| number > *n) {
self.highest_block = Some(number); self.highest_block = Some(number);
} }
let hash = info.hash(); let hash = info.hash();
@ -594,9 +599,9 @@ impl ChainSync {
} }
if headers.is_empty() { if headers.is_empty() {
// Peer does not have any new subchain heads, deactivate it nd try with another // Peer does not have any new subchain heads, deactivate it and try with another.
trace!(target: "sync", "{} Disabled for no data", peer_id); trace!(target: "sync", "{} Disabled for no data", peer_id);
io.disable_peer(peer_id); self.deactivate_peer(io, peer_id);
} }
match self.state { match self.state {
SyncState::ChainHead => { SyncState::ChainHead => {
@ -675,9 +680,9 @@ impl ChainSync {
} }
let mut unknown = false; let mut unknown = false;
{ {
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.latest_hash = header.hash(); peer.latest_hash = header.hash();
peer.latest_number = Some(header.number()); }
} }
if self.last_imported_block > header.number() && self.last_imported_block - header.number() > MAX_NEW_BLOCK_AGE { if self.last_imported_block > header.number() && self.last_imported_block - header.number() > MAX_NEW_BLOCK_AGE {
trace!(target: "sync", "Ignored ancient new block {:?}", h); trace!(target: "sync", "Ignored ancient new block {:?}", h);
@ -770,9 +775,9 @@ impl ChainSync {
new_hashes.push(hash.clone()); new_hashes.push(hash.clone());
if number > max_height { if number > max_height {
trace!(target: "sync", "New unknown block hash {:?}", hash); trace!(target: "sync", "New unknown block hash {:?}", hash);
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.latest_hash = hash.clone(); peer.latest_hash = hash.clone();
peer.latest_number = Some(number); }
max_height = number; max_height = number;
} }
}, },
@ -942,7 +947,7 @@ impl ChainSync {
return; return;
} }
let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = {
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref peer) = self.peers.get_mut(&peer_id) {
if peer.asking != PeerAsking::Nothing || !peer.can_sync() { if peer.asking != PeerAsking::Nothing || !peer.can_sync() {
return; return;
} }
@ -955,6 +960,9 @@ impl ChainSync {
return; return;
} }
(peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned(), peer.snapshot_hash.as_ref().cloned()) (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned(), peer.snapshot_hash.as_ref().cloned())
} else {
return;
}
}; };
let chain_info = io.chain().chain_info(); let chain_info = io.chain().chain_info();
let td = chain_info.pending_total_difficulty; let td = chain_info.pending_total_difficulty;
@ -1042,14 +1050,18 @@ impl ChainSync {
// check to see if we need to download any block bodies first // check to see if we need to download any block bodies first
let needed_bodies = self.blocks.needed_bodies(MAX_BODIES_TO_REQUEST, ignore_others); let needed_bodies = self.blocks.needed_bodies(MAX_BODIES_TO_REQUEST, ignore_others);
if !needed_bodies.is_empty() { if !needed_bodies.is_empty() {
replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, needed_bodies.clone()); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.asking_blocks = needed_bodies.clone();
}
self.request_bodies(io, peer_id, needed_bodies); self.request_bodies(io, peer_id, needed_bodies);
return; return;
} }
// find subchain to download // find subchain to download
if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, ignore_others) { if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, ignore_others) {
replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, vec![h.clone()]); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.asking_blocks = vec![h.clone()];
}
self.request_headers_by_hash(io, peer_id, &h, count, 0, false, PeerAsking::BlockHeaders); self.request_headers_by_hash(io, peer_id, &h, count, 0, false, PeerAsking::BlockHeaders);
} }
} }
@ -1059,14 +1071,16 @@ impl ChainSync {
self.clear_peer_download(peer_id); self.clear_peer_download(peer_id);
// find chunk data to download // find chunk data to download
if let Some(hash) = self.snapshot.needed_chunk() { if let Some(hash) = self.snapshot.needed_chunk() {
self.peers.get_mut(&peer_id).unwrap().asking_snapshot_data = Some(hash.clone()); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.asking_snapshot_data = Some(hash.clone());
}
self.request_snapshot_chunk(io, peer_id, &hash); self.request_snapshot_chunk(io, peer_id, &hash);
} }
} }
/// Clear all blocks/headers marked as being downloaded by a peer. /// Clear all blocks/headers marked as being downloaded by a peer.
fn clear_peer_download(&mut self, peer_id: PeerId) { fn clear_peer_download(&mut self, peer_id: PeerId) {
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
match peer.asking { match peer.asking {
PeerAsking::BlockHeaders | PeerAsking::Heads => { PeerAsking::BlockHeaders | PeerAsking::Heads => {
for b in &peer.asking_blocks { for b in &peer.asking_blocks {
@ -1088,6 +1102,7 @@ impl ChainSync {
peer.asking_blocks.clear(); peer.asking_blocks.clear();
peer.asking_snapshot_data = None; peer.asking_snapshot_data = None;
} }
}
fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) { fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) {
self.last_imported_block = number; self.last_imported_block = number;
@ -1211,7 +1226,7 @@ impl ChainSync {
/// Reset peer status after request is complete. /// Reset peer status after request is complete.
fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool { fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool {
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.expired = false; peer.expired = false;
if peer.asking != asking { if peer.asking != asking {
trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking);
@ -1222,11 +1237,14 @@ impl ChainSync {
peer.asking = PeerAsking::Nothing; peer.asking = PeerAsking::Nothing;
true true
} }
} else {
false
}
} }
/// Generic request sender /// Generic request sender
fn send_request(&mut self, sync: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) { fn send_request(&mut self, sync: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) {
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
if peer.asking != PeerAsking::Nothing { if peer.asking != PeerAsking::Nothing {
warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking);
} }
@ -1237,6 +1255,7 @@ impl ChainSync {
sync.disable_peer(peer_id); sync.disable_peer(peer_id);
} }
} }
}
/// Generic packet sender /// Generic packet sender
fn send_packet(&mut self, sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) { fn send_packet(&mut self, sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) {
@ -1261,7 +1280,12 @@ impl ChainSync {
item_count = min(item_count, MAX_TX_TO_IMPORT); item_count = min(item_count, MAX_TX_TO_IMPORT);
let mut transactions = Vec::with_capacity(item_count); let mut transactions = Vec::with_capacity(item_count);
for i in 0 .. item_count { for i in 0 .. item_count {
let tx = try!(r.at(i)).as_raw().to_vec(); let rlp = try!(r.at(i));
if rlp.as_raw().len() > MAX_TRANSACTION_SIZE {
debug!("Skipped oversized transaction of {} bytes", rlp.as_raw().len());
continue;
}
let tx = rlp.as_raw().to_vec();
transactions.push(tx); transactions.push(tx);
} }
io.chain().queue_transactions(transactions); io.chain().queue_transactions(transactions);
@ -1604,7 +1628,7 @@ impl ChainSync {
/// creates latest block rlp for the given client /// creates latest block rlp for the given client
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes {
let mut rlp_stream = RlpStream::new_list(2); let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(&chain.block(BlockID::Hash(chain.chain_info().best_block_hash)).unwrap(), 1); rlp_stream.append_raw(&chain.block(BlockID::Hash(chain.chain_info().best_block_hash)).expect("Best block always exists"), 1);
rlp_stream.append(&chain.chain_info().total_difficulty); rlp_stream.append(&chain.chain_info().total_difficulty);
rlp_stream.out() rlp_stream.out()
} }
@ -1618,25 +1642,23 @@ impl ChainSync {
} }
/// returns peer ids that have less blocks than our chain /// returns peer ids that have less blocks than our chain
fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<(PeerId, BlockNumber)> { fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<PeerId> {
let latest_hash = chain_info.best_block_hash; let latest_hash = chain_info.best_block_hash;
let latest_number = chain_info.best_block_number;
self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)| self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)|
match io.chain().block_status(BlockID::Hash(peer_info.latest_hash.clone())) { match io.chain().block_status(BlockID::Hash(peer_info.latest_hash.clone())) {
BlockStatus::InChain => { BlockStatus::InChain => {
if peer_info.latest_number.is_none() { if peer_info.latest_hash != latest_hash {
peer_info.latest_number = Some(HeaderView::new(&io.chain().block_header(BlockID::Hash(peer_info.latest_hash.clone())).unwrap()).number()); Some(id)
} else {
None
} }
if peer_info.latest_hash != latest_hash && latest_number > peer_info.latest_number.unwrap() {
Some((id, peer_info.latest_number.unwrap()))
} else { None }
}, },
_ => None _ => None
}) })
.collect::<Vec<_>>() .collect::<Vec<_>>()
} }
fn select_random_lagging_peers(&mut self, peers: &[(PeerId, BlockNumber)]) -> Vec<(PeerId, BlockNumber)> { fn select_random_lagging_peers(&mut self, peers: &[PeerId]) -> Vec<PeerId> {
use rand::Rng; use rand::Rng;
// take sqrt(x) peers // take sqrt(x) peers
let mut peers = peers.to_vec(); let mut peers = peers.to_vec();
@ -1649,46 +1671,42 @@ impl ChainSync {
} }
/// propagates latest block to lagging peers /// propagates latest block to lagging peers
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[(PeerId, BlockNumber)]) -> usize { fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[PeerId]) -> usize {
trace!(target: "sync", "Sending NewBlocks to {:?}", peers); trace!(target: "sync", "Sending NewBlocks to {:?}", peers);
let mut sent = 0; let mut sent = 0;
for &(peer_id, _) in peers { for peer_id in peers {
if sealed.is_empty() { if sealed.is_empty() {
let rlp = ChainSync::create_latest_block_rlp(io.chain()); let rlp = ChainSync::create_latest_block_rlp(io.chain());
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp); self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
} else { } else {
for h in sealed { for h in sealed {
let rlp = ChainSync::create_new_block_rlp(io.chain(), h); let rlp = ChainSync::create_new_block_rlp(io.chain(), h);
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp); self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
} }
} }
self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
self.peers.get_mut(&peer_id).unwrap().latest_number = Some(chain_info.best_block_number); peer.latest_hash = chain_info.best_block_hash.clone();
}
sent += 1; sent += 1;
} }
sent sent
} }
/// propagates new known hashes to all peers /// propagates new known hashes to all peers
fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[(PeerId, BlockNumber)]) -> usize { fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize {
trace!(target: "sync", "Sending NewHashes to {:?}", peers); trace!(target: "sync", "Sending NewHashes to {:?}", peers);
let mut sent = 0; let mut sent = 0;
let last_parent = HeaderView::new(&io.chain().block_header(BlockID::Hash(chain_info.best_block_hash.clone())).unwrap()).parent_hash(); let last_parent = HeaderView::new(&io.chain().block_header(BlockID::Hash(chain_info.best_block_hash.clone()))
for &(peer_id, peer_number) in peers { .expect("Best block always exists")).parent_hash();
let peer_best = if chain_info.best_block_number - peer_number > MAX_PEER_LAG_PROPAGATION as BlockNumber { for peer_id in peers {
// If we think peer is too far behind just send one latest hash sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) {
last_parent.clone()
} else {
self.peers.get(&peer_id).unwrap().latest_hash.clone()
};
sent += match ChainSync::create_new_hashes_rlp(io.chain(), &peer_best, &chain_info.best_block_hash) {
Some(rlp) => { Some(rlp) => {
{ {
let peer = self.peers.get_mut(&peer_id).unwrap(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
peer.latest_hash = chain_info.best_block_hash.clone(); peer.latest_hash = chain_info.best_block_hash.clone();
peer.latest_number = Some(chain_info.best_block_number);
} }
self.send_packet(io, peer_id, NEW_BLOCK_HASHES_PACKET, rlp); }
self.send_packet(io, *peer_id, NEW_BLOCK_HASHES_PACKET, rlp);
1 1
}, },
None => 0 None => 0
@ -2001,7 +2019,6 @@ mod tests {
genesis: H256::zero(), genesis: H256::zero(),
network_id: U256::zero(), network_id: U256::zero(),
latest_hash: peer_latest_hash, latest_hash: peer_latest_hash,
latest_number: None,
difficulty: None, difficulty: None,
asking: PeerAsking::Nothing, asking: PeerAsking::Nothing,
asking_blocks: Vec::new(), asking_blocks: Vec::new(),

View File

@ -64,3 +64,9 @@ pub use api::{EthSync, SyncProvider, SyncClient, NetworkManagerClient, ManageNet
ServiceConfiguration, NetworkConfiguration}; ServiceConfiguration, NetworkConfiguration};
pub use chain::{SyncStatus, SyncState}; pub use chain::{SyncStatus, SyncState};
pub use network::{is_valid_node_url, NonReservedPeerMode, NetworkError}; pub use network::{is_valid_node_url, NonReservedPeerMode, NetworkError};
/// IPC interfaces
#[cfg(feature="ipc")]
pub mod remote {
pub use api::{SyncClient, NetworkManagerClient};
}

View File

@ -95,6 +95,27 @@ fn forked() {
assert_eq!(&*net.peer(2).chain.numbers.read(), &peer1_chain); assert_eq!(&*net.peer(2).chain.numbers.read(), &peer1_chain);
} }
#[test]
fn forked_with_misbehaving_peer() {
::env_logger::init().ok();
let mut net = TestNet::new(3);
// peer 0 is on a totally different chain with higher total difficulty
net.peer_mut(0).chain = TestBlockChainClient::new_with_extra_data(b"fork".to_vec());
net.peer_mut(0).chain.add_blocks(500, EachBlockWith::Nothing);
net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Nothing);
net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Nothing);
net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Nothing);
net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle);
// peer 1 should sync to peer 2, others should not change
let peer0_chain = net.peer(0).chain.numbers.read().clone();
let peer2_chain = net.peer(2).chain.numbers.read().clone();
net.sync();
assert_eq!(&*net.peer(0).chain.numbers.read(), &peer0_chain);
assert_eq!(&*net.peer(1).chain.numbers.read(), &peer2_chain);
assert_eq!(&*net.peer(2).chain.numbers.read(), &peer2_chain);
}
#[test] #[test]
fn net_hard_fork() { fn net_hard_fork() {
::env_logger::init().ok(); ::env_logger::init().ok();
@ -116,11 +137,12 @@ fn net_hard_fork() {
#[test] #[test]
fn restart() { fn restart() {
::env_logger::init().ok();
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle);
net.sync_steps(8); net.sync();
// make sure that sync has actually happened // make sure that sync has actually happened
assert!(net.peer(0).chain.chain_info().best_block_number > 100); assert!(net.peer(0).chain.chain_info().best_block_number > 100);

View File

@ -29,6 +29,7 @@ pub struct TestIo<'p> {
pub snapshot_service: &'p TestSnapshotService, pub snapshot_service: &'p TestSnapshotService,
pub queue: &'p mut VecDeque<TestPacket>, pub queue: &'p mut VecDeque<TestPacket>,
pub sender: Option<PeerId>, pub sender: Option<PeerId>,
pub to_disconnect: HashSet<PeerId>,
} }
impl<'p> TestIo<'p> { impl<'p> TestIo<'p> {
@ -37,16 +38,19 @@ impl<'p> TestIo<'p> {
chain: chain, chain: chain,
snapshot_service: ss, snapshot_service: ss,
queue: queue, queue: queue,
sender: sender sender: sender,
to_disconnect: HashSet::new(),
} }
} }
} }
impl<'p> SyncIo for TestIo<'p> { impl<'p> SyncIo for TestIo<'p> {
fn disable_peer(&mut self, _peer_id: PeerId) { fn disable_peer(&mut self, peer_id: PeerId) {
self.disconnect_peer(peer_id);
} }
fn disconnect_peer(&mut self, _peer_id: PeerId) { fn disconnect_peer(&mut self, peer_id: PeerId) {
self.to_disconnect.insert(peer_id);
} }
fn is_expired(&self) -> bool { fn is_expired(&self) -> bool {
@ -150,13 +154,30 @@ impl TestNet {
pub fn sync_step(&mut self) { pub fn sync_step(&mut self) {
for peer in 0..self.peers.len() { for peer in 0..self.peers.len() {
if let Some(packet) = self.peers[peer].queue.pop_front() { if let Some(packet) = self.peers[peer].queue.pop_front() {
let disconnecting = {
let mut p = self.peers.get_mut(packet.recipient).unwrap(); let mut p = self.peers.get_mut(packet.recipient).unwrap();
trace!("--- {} -> {} ---", peer, packet.recipient); trace!("--- {} -> {} ---", peer, packet.recipient);
ChainSync::dispatch_packet(&p.sync, &mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); let to_disconnect = {
trace!("----------------"); let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data);
io.to_disconnect
};
for d in &to_disconnect {
// notify this that disconnecting peers are disconnecting
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(*d));
p.sync.write().on_peer_aborting(&mut io, *d);
} }
let mut p = self.peers.get_mut(peer).unwrap(); to_disconnect
p.sync.write().maintain_sync(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, None)); };
for d in &disconnecting {
// notify other peers that this peer is disconnecting
let mut p = self.peers.get_mut(*d).unwrap();
let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId));
p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
}
}
self.sync_step_peer(peer);
} }
} }

View File

@ -4,7 +4,7 @@ homepage = "http://ethcore.io"
repository = "https://github.com/ethcore/parity" repository = "https://github.com/ethcore/parity"
license = "GPL-3.0" license = "GPL-3.0"
name = "ethcore-bigint" name = "ethcore-bigint"
version = "0.1.0" version = "0.1.1"
authors = ["Ethcore <admin@ethcore.io>"] authors = ["Ethcore <admin@ethcore.io>"]
build = "build.rs" build = "build.rs"

View File

@ -68,6 +68,8 @@ mod panics;
use mio::{EventLoop, Token}; use mio::{EventLoop, Token};
use std::fmt; use std::fmt;
pub use worker::LOCAL_STACK_SIZE;
#[derive(Debug)] #[derive(Debug)]
/// IO Error /// IO Error
pub enum IoError { pub enum IoError {

View File

@ -22,9 +22,19 @@ use crossbeam::sync::chase_lev;
use service::{HandlerId, IoChannel, IoContext}; use service::{HandlerId, IoChannel, IoContext};
use IoHandler; use IoHandler;
use panics::*; use panics::*;
use std::cell::Cell;
use std::sync::{Condvar as SCondvar, Mutex as SMutex}; use std::sync::{Condvar as SCondvar, Mutex as SMutex};
const STACK_SIZE: usize = 16*1024*1024;
thread_local! {
/// Stack size
/// Should be modified if it is changed in Rust since it is no way
/// to know or get it
pub static LOCAL_STACK_SIZE: Cell<usize> = Cell::new(::std::env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok()).unwrap_or(2 * 1024 * 1024));
}
pub enum WorkType<Message> { pub enum WorkType<Message> {
Readable, Readable,
Writable, Writable,
@ -66,8 +76,9 @@ impl Worker {
deleting: deleting.clone(), deleting: deleting.clone(),
wait_mutex: wait_mutex.clone(), wait_mutex: wait_mutex.clone(),
}; };
worker.thread = Some(thread::Builder::new().name(format!("IO Worker #{}", index)).spawn( worker.thread = Some(thread::Builder::new().stack_size(STACK_SIZE).name(format!("IO Worker #{}", index)).spawn(
move || { move || {
LOCAL_STACK_SIZE.with(|val| val.set(STACK_SIZE));
panic_handler.catch_panic(move || { panic_handler.catch_panic(move || {
Worker::work_loop(stealer, channel.clone(), wait, wait_mutex.clone(), deleting) Worker::work_loop(stealer, channel.clone(), wait, wait_mutex.clone(), deleting)
}).unwrap() }).unwrap()

View File

@ -591,7 +591,8 @@ impl Host {
} }
fn handshake_count(&self) -> usize { fn handshake_count(&self) -> usize {
self.sessions.read().count() - self.session_count() // session_count < total_count is possible because of the data race.
self.sessions.read().count().saturating_sub(self.session_count())
} }
fn keep_alive(&self, io: &IoContext<NetworkIoMessage>) { fn keep_alive(&self, io: &IoContext<NetworkIoMessage>) {