diff --git a/.travis.yml b/.travis.yml index 7213b8f09..6ae41379e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,11 +14,11 @@ matrix: - rust: nightly include: - rust: stable - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: beta - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: nightly - env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" cache: apt: true directories: @@ -33,10 +33,6 @@ addons: - libcurl4-openssl-dev - libelf-dev - libdw-dev -before_script: | - sudo add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && - sudo apt-get update && - sudo apt-get install -y --force-yes librocksdb script: - cargo build --release --verbose ${FEATURES} - cargo test --release --verbose ${FEATURES} ${TARGETS} @@ -51,6 +47,7 @@ after_success: | ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethcore-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethsync-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethcore_rpc-* && + ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethminer-* && ./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /usr/,/.cargo,/root/.multirust target/kcov target/debug/parity-* && [ $TRAVIS_BRANCH = master ] && [ $TRAVIS_PULL_REQUEST = false ] && diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..dfe37dbb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,21 +1,24 @@ [root] name = "parity" -version = "0.9.99" +version = "1.1.0" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore 0.9.99", - "ethcore-devtools 0.9.99", - "ethcore-rpc 0.9.99", - "ethcore-util 0.9.99", - "ethsync 0.9.99", + "ethcore 1.1.0", + "ethcore-devtools 1.1.0", + "ethcore-rpc 1.1.0", + "ethcore-util 1.1.0", + "ethminer 1.1.0", + "ethsync 1.1.0", "fdlimit 0.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rpassword 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -29,7 +32,7 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -41,14 +44,14 @@ name = "aster" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bigint" version = "0.1.0" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -63,7 +66,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -83,7 +86,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chrono" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -92,7 +95,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.44" +version = "0.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -115,7 +118,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -134,7 +137,7 @@ version = "1.1.1" source = "git+https://github.com/tomusdrw/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -143,7 +146,15 @@ name = "daemonize" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "deque" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -151,7 +162,7 @@ name = "docopt" version = "0.6.78" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -167,7 +178,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -175,7 +186,7 @@ name = "eth-secp256k1" version = "0.5.4" source = "git+https://github.com/ethcore/rust-secp256k1#283a0677d8327536be58a87e0494d7e0e7b1d1d8" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -186,7 +197,7 @@ dependencies = [ [[package]] name = "ethash" -version = "0.9.99" +version = "1.1.0" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -195,14 +206,14 @@ dependencies = [ [[package]] name = "ethcore" -version = "0.9.99" +version = "1.1.0" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethash 0.9.99", - "ethcore-devtools 0.9.99", - "ethcore-util 0.9.99", + "ethash 1.1.0", + "ethcore-devtools 1.1.0", + "ethcore-util 1.1.0", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -214,22 +225,23 @@ dependencies = [ [[package]] name = "ethcore-devtools" -version = "0.9.99" +version = "1.1.0" dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ethcore-rpc" -version = "0.9.99" +version = "1.1.0" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "ethash 0.9.99", - "ethcore 0.9.99", - "ethcore-util 0.9.99", - "ethsync 0.9.99", - "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "ethash 1.1.0", + "ethcore 1.1.0", + "ethcore-util 1.1.0", + "ethminer 1.1.0", + "ethsync 1.1.0", + "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -241,27 +253,27 @@ dependencies = [ [[package]] name = "ethcore-util" -version = "0.9.99" +version = "1.1.0" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", - "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", - "ethcore-devtools 0.9.99", + "ethcore-devtools 1.1.0", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "json-tests 0.1.0", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.3 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -275,17 +287,31 @@ dependencies = [ ] [[package]] -name = "ethsync" -version = "0.9.99" +name = "ethminer" +version = "1.1.0" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore 0.9.99", - "ethcore-util 0.9.99", + "ethcore 1.1.0", + "ethcore-util 1.1.0", + "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ethsync" +version = "1.1.0" +dependencies = [ + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore 1.1.0", + "ethcore-util 1.1.0", + "ethminer 1.1.0", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -293,7 +319,7 @@ dependencies = [ name = "fdlimit" version = "0.1.0" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -303,7 +329,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "glob" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -317,7 +343,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -370,7 +396,7 @@ dependencies = [ "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -380,27 +406,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itertools" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "json-tests" version = "0.1.0" dependencies = [ - "glob 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jsonrpc-core" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -411,11 +437,11 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "2.1.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -450,16 +476,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.2.3" +source = "git+https://github.com/arkpar/rust-rocksdb.git#ebb602fc74b4067f9f51310bdc0401b8e59b7156" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -467,7 +492,7 @@ name = "log" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -480,7 +505,7 @@ name = "memchr" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -526,7 +551,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -550,7 +575,7 @@ dependencies = [ [[package]] name = "nom" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -567,7 +592,7 @@ name = "num_cpus" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -583,11 +608,6 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "pkg-config" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "primal" version = "0.2.3" @@ -634,7 +654,7 @@ name = "quasi" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -644,7 +664,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aster 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -652,17 +672,27 @@ name = "rand" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rayon" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex" -version = "0.1.54" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -671,13 +701,29 @@ name = "regex-syntax" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "regex-syntax" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rocksdb" -version = "0.4.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.4.3" +source = "git+https://github.com/arkpar/rust-rocksdb.git#ebb602fc74b4067f9f51310bdc0401b8e59b7156" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "librocksdb-sys 0.2.3 (git+https://github.com/arkpar/rust-rocksdb.git)", +] + +[[package]] +name = "rpassword" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -715,7 +761,7 @@ name = "semver" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nom 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -740,7 +786,7 @@ dependencies = [ "quasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "quasi_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -783,16 +829,16 @@ name = "syntex" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syntex_syntax" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -813,13 +859,21 @@ dependencies = [ "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "termios" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "time" version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -884,7 +938,7 @@ dependencies = [ [[package]] name = "url" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -941,7 +995,7 @@ name = "xml-rs" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..782dd1c79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,13 @@ [package] description = "Ethcore client." name = "parity" -version = "0.9.99" +version = "1.1.0" license = "GPL-3.0" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -12,23 +16,29 @@ rustc-serialize = "0.3" docopt = "0.6" time = "0.1" ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } -clippy = { version = "0.0.44", optional = true } -ethcore-util = { path = "util" } -ethcore = { path = "ethcore" } -ethsync = { path = "sync" } -ethcore-rpc = { path = "rpc", optional = true } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" -ethcore-devtools = { path = "devtools" } number_prefix = "0.2" +rpassword = "0.1" +clippy = { version = "0.0.50", optional = true } +ethcore = { path = "ethcore" } +ethcore-util = { path = "util" } +ethsync = { path = "sync" } +ethminer = { path = "miner" } +ethcore-devtools = { path = "devtools" } +ethcore-rpc = { path = "rpc", optional = true } [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"] travis-beta = ["ethcore/json-tests"] travis-nightly = ["ethcore/json-tests", "dev"] [[bin]] path = "parity/main.rs" name = "parity" + +[profile.release] +debug = false +lto = false diff --git a/README.md b/README.md index 4fd2a53cc..47a27e30e 100644 --- a/README.md +++ b/README.md @@ -34,9 +34,6 @@ Then, download and build Parity: git clone https://github.com/ethcore/parity cd parity -# parity should be built with rust beta -multirust override beta - # build in release mode cargo build --release ``` diff --git a/build.rs b/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/cov.sh b/cov.sh index a1fa29e46..d60ef223d 100755 --- a/cov.sh +++ b/cov.sh @@ -15,12 +15,23 @@ if ! type kcov > /dev/null; then exit 1 fi -cargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --no-run || exit $? +cargo test \ + -p ethash \ + -p ethcore-util \ + -p ethcore \ + -p ethsync \ + -p ethcore-rpc \ + -p parity \ + -p ethminer \ + --no-run || exit $? rm -rf target/coverage mkdir -p target/coverage -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethash-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethsync-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* + +EXCLUDE="~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests" +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethash-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethsync-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethminer-* xdg-open target/coverage/index.html diff --git a/devtools/Cargo.toml b/devtools/Cargo.toml index ce0260936..19178fbfe 100644 --- a/devtools/Cargo.toml +++ b/devtools/Cargo.toml @@ -3,7 +3,7 @@ description = "Ethcore development/test/build tools" homepage = "http://ethcore.io" license = "GPL-3.0" name = "ethcore-devtools" -version = "0.9.99" +version = "1.1.0" authors = ["Ethcore "] [dependencies] diff --git a/doc.sh b/doc.sh index 2fd5ac20f..a5e5e2e13 100755 --- a/doc.sh +++ b/doc.sh @@ -1,4 +1,11 @@ #!/bin/sh # generate documentation only for partiy and ethcore libraries -cargo doc --no-deps --verbose -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity +cargo doc --no-deps --verbose \ + -p ethash \ + -p ethcore-util \ + -p ethcore \ + -p ethsync \ + -p ethcore-rpc \ + -p parity \ + -p ethminer diff --git a/docker/ubuntu-dev/Dockerfile b/docker/ubuntu-dev/Dockerfile index 8b016e6fd..05e8dfe8f 100644 --- a/docker/ubuntu-dev/Dockerfile +++ b/docker/ubuntu-dev/Dockerfile @@ -8,8 +8,8 @@ RUN apt-get update && \ # add-apt-repository software-properties-common \ curl \ - gcc \ - wget \ + g++ \ + wget \ git \ # evmjit dependencies zlib1g-dev \ @@ -18,9 +18,8 @@ RUN apt-get update && \ # cmake, llvm and rocksdb ppas. then update ppas RUN add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \ add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \ - add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && \ apt-get update && \ - apt-get install -y --force-yes cmake llvm-3.7-dev librocksdb + apt-get install -y --force-yes cmake llvm-3.7-dev # install evmjit RUN git clone https://github.com/debris/evmjit && \ @@ -31,9 +30,6 @@ RUN git clone https://github.com/debris/evmjit && \ # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes -# install nightly and make it default -RUN multirust update nightly && multirust default nightly - # export rust LIBRARY_PATH ENV LIBRARY_PATH /usr/local/lib diff --git a/docker/ubuntu-jit/Dockerfile b/docker/ubuntu-jit/Dockerfile index 90ce531be..138882d2b 100644 --- a/docker/ubuntu-jit/Dockerfile +++ b/docker/ubuntu-jit/Dockerfile @@ -8,9 +8,9 @@ RUN apt-get update && \ # add-apt-repository software-properties-common \ curl \ - wget \ + wget \ git \ - gcc \ + g++ \ # evmjit dependencies zlib1g-dev \ libedit-dev @@ -18,9 +18,8 @@ RUN apt-get update && \ # cmake, llvm and rocksdb ppas. then update ppas RUN add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \ add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \ - add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && \ apt-get update && \ - apt-get install -y --force-yes cmake llvm-3.7-dev librocksdb + apt-get install -y --force-yes cmake llvm-3.7-dev # install evmjit RUN git clone https://github.com/debris/evmjit && \ @@ -31,9 +30,6 @@ RUN git clone https://github.com/debris/evmjit && \ # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes -# install nightly and make it default -RUN multirust update nightly && multirust default nightly - # export rust LIBRARY_PATH ENV LIBRARY_PATH /usr/local/lib @@ -41,7 +37,6 @@ ENV LIBRARY_PATH /usr/local/lib ENV RUST_BACKTRACE 1 # build parity -# TODO: add jit feature RUN git clone https://github.com/ethcore/parity && \ cd parity && \ - cargo install --features rpc + cargo build --release --features ethcore/jit diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 812e66e9e..38c628d0e 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -3,23 +3,14 @@ FROM ubuntu:14.04 # install tools and dependencies RUN apt-get update && \ apt-get install -y \ - gcc \ + g++ \ curl \ git \ - # add-apt-repository - software-properties-common - -# rocksdb ppas. then update ppas -RUN add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && \ - apt-get update && \ - apt-get install -y --force-yes librocksdb + make # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes -# install nightly and make it default -RUN multirust update nightly && multirust default nightly - # export rust LIBRARY_PATH ENV LIBRARY_PATH /usr/local/lib @@ -29,4 +20,4 @@ ENV RUST_BACKTRACE 1 # build parity RUN git clone https://github.com/ethcore/parity && \ cd parity && \ - cargo install --features rpc + cargo build --release diff --git a/ethash/Cargo.toml b/ethash/Cargo.toml index e2a2ec4d8..70d08249c 100644 --- a/ethash/Cargo.toml +++ b/ethash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethash" -version = "0.9.99" +version = "1.1.0" authors = ["arkpar "] [dependencies] @@ -17,7 +17,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.50", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index e7f1b2bad..f95ec53a1 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -13,17 +13,14 @@ pub struct AccountDB<'db> { #[inline] fn combine_key<'a>(address: &'a H256, key: &'a H256) -> H256 { - let mut addr_hash = address.sha3(); - // preserve 96 bits of original key for db lookup - addr_hash[0..12].clone_from_slice(&[0u8; 12]); - &addr_hash ^ key + address ^ key } impl<'db> AccountDB<'db> { pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> { AccountDB { db: db, - address: x!(address.clone()), + address: x!(address), } } } @@ -70,7 +67,7 @@ impl<'db> AccountDBMut<'db> { pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> { AccountDBMut { db: db, - address: x!(address.clone()), + address: x!(address), } } @@ -100,6 +97,9 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn insert(&mut self, value: &[u8]) -> H256 { + if value == &NULL_RLP { + return SHA3_NULL_RLP.clone(); + } let k = value.sha3(); let ak = combine_key(&self.address, &k); self.db.emplace(ak, value.to_vec()); @@ -107,11 +107,17 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn emplace(&mut self, key: H256, value: Bytes) { + if key == SHA3_NULL_RLP { + return; + } let key = combine_key(&self.address, &key); self.db.emplace(key, value.to_vec()) } fn kill(&mut self, key: &H256) { + if key == &SHA3_NULL_RLP { + return; + } let key = combine_key(&self.address, key); self.db.kill(&key) } diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 68f647e37..1ef28188b 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -171,7 +171,7 @@ pub struct SealedBlock { impl<'x> OpenBlock<'x> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new(engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { + pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -185,7 +185,7 @@ impl<'x> OpenBlock<'x> { r.block.base.header.extra_data = extra_data; r.block.base.header.note_dirty(); - engine.populate_from_parent(&mut r.block.base.header, parent); + engine.populate_from_parent(&mut r.block.base.header, parent, gas_floor_target); engine.on_new_block(&mut r.block); r } @@ -265,7 +265,7 @@ impl<'x> OpenBlock<'x> { let mut s = self; s.engine.on_close_block(&mut s.block); s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|ref e| e.rlp_bytes().to_vec()).collect()); - let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append(&u.rlp(Seal::With)); s} ).out(); + let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); s.block.base.header.uncles_hash = uncle_bytes.sha3(); s.block.base.header.state_root = s.block.state.root().clone(); s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|ref r| r.rlp_bytes().to_vec()).collect()); @@ -274,7 +274,7 @@ impl<'x> OpenBlock<'x> { s.block.base.header.note_dirty(); ClosedBlock { - block: s.block, + block: s.block, uncle_bytes: uncle_bytes, } } @@ -317,7 +317,7 @@ impl ClosedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> JournalDB { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl SealedBlock { @@ -331,7 +331,7 @@ impl SealedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> JournalDB { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl IsBlock for SealedBlock { @@ -339,15 +339,15 @@ impl IsBlock for SealedBlock { } /// Enact the block given by block header, transactions and uncles -pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { - let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); + let s = State::from_existing(db.spawn(), parent.state_root().clone(), engine.account_start_nonce()); trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); } } - let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author().clone(), header.extra_data().clone()); + let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author().clone(), x!(3141562), header.extra_data().clone()); b.set_difficulty(*header.difficulty()); b.set_gas_limit(*header.gas_limit()); b.set_timestamp(header.timestamp()); @@ -357,20 +357,20 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let block = BlockView::new(block_bytes); let header = block.header(); enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let view = BlockView::new(&block.bytes); enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let header = BlockView::new(block_bytes).header_view(); Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal()))) } @@ -389,9 +389,9 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; - let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]); let b = b.close(); let _ = b.seal(engine.deref(), vec![]); } @@ -404,14 +404,14 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); - let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap(); + engine.spec().ensure_db_good(db.as_hashdb_mut()); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]).close().seal(engine.deref(), vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, vec![genesis_header.hash()]).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); @@ -420,4 +420,9 @@ mod tests { assert_eq!(orig_db.keys(), db.keys()); assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None); } + + #[test] + fn enact_block_with_uncle() { + // TODO: test for when there's an uncle. + } } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 490a17995..042df1dc1 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -95,7 +95,7 @@ pub struct BlockQueue { panic_handler: Arc, engine: Arc>, more_to_verify: Arc, - verification: Arc>, + verification: Arc, verifiers: Vec>, deleting: Arc, ready_signal: Arc, @@ -132,18 +132,23 @@ impl QueueSignal { } } -#[derive(Default)] struct Verification { - unverified: VecDeque, - verified: VecDeque, - verifying: VecDeque, - bad: HashSet, + // All locks must be captured in the order declared here. + unverified: Mutex>, + verified: Mutex>, + verifying: Mutex>, + bad: Mutex>, } impl BlockQueue { /// Creates a new queue instance. pub fn new(config: BlockQueueConfig, engine: Arc>, message_channel: IoChannel) -> BlockQueue { - let verification = Arc::new(Mutex::new(Verification::default())); + let verification = Arc::new(Verification { + unverified: Mutex::new(VecDeque::new()), + verified: Mutex::new(VecDeque::new()), + verifying: Mutex::new(VecDeque::new()), + bad: Mutex::new(HashSet::new()), + }); let more_to_verify = Arc::new(Condvar::new()); let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel }); let deleting = Arc::new(AtomicBool::new(false)); @@ -186,17 +191,17 @@ impl BlockQueue { } } - fn verify(verification: Arc>, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { + fn verify(verification: Arc, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { while !deleting.load(AtomicOrdering::Acquire) { { - let mut lock = verification.lock().unwrap(); + let mut unverified = verification.unverified.lock().unwrap(); - if lock.unverified.is_empty() && lock.verifying.is_empty() { + if unverified.is_empty() && verification.verifying.lock().unwrap().is_empty() { empty.notify_all(); } - while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { - lock = wait.wait(lock).unwrap(); + while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { + unverified = wait.wait(unverified).unwrap(); } if deleting.load(AtomicOrdering::Acquire) { @@ -205,39 +210,42 @@ impl BlockQueue { } let block = { - let mut v = verification.lock().unwrap(); - if v.unverified.is_empty() { + let mut unverified = verification.unverified.lock().unwrap(); + if unverified.is_empty() { continue; } - let block = v.unverified.pop_front().unwrap(); - v.verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); + let mut verifying = verification.verifying.lock().unwrap(); + let block = unverified.pop_front().unwrap(); + verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); block }; let block_hash = block.header.hash(); match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) { Ok(verified) => { - let mut v = verification.lock().unwrap(); - for e in &mut v.verifying { + let mut verifying = verification.verifying.lock().unwrap(); + for e in verifying.iter_mut() { if e.hash == block_hash { e.block = Some(verified); break; } } - if !v.verifying.is_empty() && v.verifying.front().unwrap().hash == block_hash { + if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash { // we're next! - let mut vref = v.deref_mut(); - BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); + let mut verified = verification.verified.lock().unwrap(); + let mut bad = verification.bad.lock().unwrap(); + BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } }, Err(err) => { - let mut v = verification.lock().unwrap(); + let mut verifying = verification.verifying.lock().unwrap(); + let mut verified = verification.verified.lock().unwrap(); + let mut bad = verification.bad.lock().unwrap(); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); - v.bad.insert(block_hash.clone()); - v.verifying.retain(|e| e.hash != block_hash); - let mut vref = v.deref_mut(); - BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); + bad.insert(block_hash.clone()); + verifying.retain(|e| e.hash != block_hash); + BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } } @@ -257,19 +265,21 @@ impl BlockQueue { } /// Clear the queue and stop verification activity. - pub fn clear(&mut self) { - let mut verification = self.verification.lock().unwrap(); - verification.unverified.clear(); - verification.verifying.clear(); - verification.verified.clear(); + pub fn clear(&self) { + let mut unverified = self.verification.unverified.lock().unwrap(); + let mut verifying = self.verification.verifying.lock().unwrap(); + let mut verified = self.verification.verified.lock().unwrap(); + unverified.clear(); + verifying.clear(); + verified.clear(); self.processing.write().unwrap().clear(); } - /// Wait for queue to be empty - pub fn flush(&mut self) { - let mut verification = self.verification.lock().unwrap(); - while !verification.unverified.is_empty() || !verification.verifying.is_empty() { - verification = self.empty.wait(verification).unwrap(); + /// Wait for unverified queue to be empty + pub fn flush(&self) { + let mut unverified = self.verification.unverified.lock().unwrap(); + while !unverified.is_empty() || !self.verification.verifying.lock().unwrap().is_empty() { + unverified = self.empty.wait(unverified).unwrap(); } } @@ -278,27 +288,28 @@ impl BlockQueue { if self.processing.read().unwrap().contains(&hash) { return BlockStatus::Queued; } - if self.verification.lock().unwrap().bad.contains(&hash) { + if self.verification.bad.lock().unwrap().contains(&hash) { return BlockStatus::Bad; } BlockStatus::Unknown } /// Add a block to the queue. - pub fn import_block(&mut self, bytes: Bytes) -> ImportResult { + pub fn import_block(&self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); let h = header.hash(); - if self.processing.read().unwrap().contains(&h) { - return Err(x!(ImportError::AlreadyQueued)); - } { - let mut verification = self.verification.lock().unwrap(); - if verification.bad.contains(&h) { + if self.processing.read().unwrap().contains(&h) { + return Err(x!(ImportError::AlreadyQueued)); + } + + let mut bad = self.verification.bad.lock().unwrap(); + if bad.contains(&h) { return Err(x!(ImportError::KnownBad)); } - if verification.bad.contains(&header.parent_hash) { - verification.bad.insert(h.clone()); + if bad.contains(&header.parent_hash) { + bad.insert(h.clone()); return Err(x!(ImportError::KnownBad)); } } @@ -306,45 +317,50 @@ impl BlockQueue { match verify_block_basic(&header, &bytes, self.engine.deref().deref()) { Ok(()) => { self.processing.write().unwrap().insert(h.clone()); - self.verification.lock().unwrap().unverified.push_back(UnverifiedBlock { header: header, bytes: bytes }); + self.verification.unverified.lock().unwrap().push_back(UnverifiedBlock { header: header, bytes: bytes }); self.more_to_verify.notify_all(); Ok(h) }, Err(err) => { warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); - self.verification.lock().unwrap().bad.insert(h.clone()); + self.verification.bad.lock().unwrap().insert(h.clone()); Err(err) } } } /// Mark given block and all its children as bad. Stops verification. - pub fn mark_as_bad(&mut self, block_hashes: &[H256]) { - let mut verification_lock = self.verification.lock().unwrap(); + pub fn mark_as_bad(&self, block_hashes: &[H256]) { + if block_hashes.is_empty() { + return; + } + let mut verified_lock = self.verification.verified.lock().unwrap(); + let mut verified = verified_lock.deref_mut(); + let mut bad = self.verification.bad.lock().unwrap(); let mut processing = self.processing.write().unwrap(); - - let mut verification = verification_lock.deref_mut(); - - verification.bad.reserve(block_hashes.len()); + bad.reserve(block_hashes.len()); for hash in block_hashes { - verification.bad.insert(hash.clone()); + bad.insert(hash.clone()); processing.remove(&hash); } let mut new_verified = VecDeque::new(); - for block in verification.verified.drain(..) { - if verification.bad.contains(&block.header.parent_hash) { - verification.bad.insert(block.header.hash()); + for block in verified.drain(..) { + if bad.contains(&block.header.parent_hash) { + bad.insert(block.header.hash()); processing.remove(&block.header.hash()); } else { new_verified.push_back(block); } } - verification.verified = new_verified; + *verified = new_verified; } /// Mark given block as processed - pub fn mark_as_good(&mut self, block_hashes: &[H256]) { + pub fn mark_as_good(&self, block_hashes: &[H256]) { + if block_hashes.is_empty() { + return; + } let mut processing = self.processing.write().unwrap(); for hash in block_hashes { processing.remove(&hash); @@ -352,16 +368,16 @@ impl BlockQueue { } /// Removes up to `max` verified blocks from the queue - pub fn drain(&mut self, max: usize) -> Vec { - let mut verification = self.verification.lock().unwrap(); - let count = min(max, verification.verified.len()); + pub fn drain(&self, max: usize) -> Vec { + let mut verified = self.verification.verified.lock().unwrap(); + let count = min(max, verified.len()); let mut result = Vec::with_capacity(count); for _ in 0..count { - let block = verification.verified.pop_front().unwrap(); + let block = verified.pop_front().unwrap(); result.push(block); } self.ready_signal.reset(); - if !verification.verified.is_empty() { + if !verified.is_empty() { self.ready_signal.set(); } result @@ -369,28 +385,39 @@ impl BlockQueue { /// Get queue status. pub fn queue_info(&self) -> BlockQueueInfo { - let verification = self.verification.lock().unwrap(); + let (unverified_len, unverified_bytes) = { + let v = self.verification.unverified.lock().unwrap(); + (v.len(), v.heap_size_of_children()) + }; + let (verifying_len, verifying_bytes) = { + let v = self.verification.verifying.lock().unwrap(); + (v.len(), v.heap_size_of_children()) + }; + let (verified_len, verified_bytes) = { + let v = self.verification.verified.lock().unwrap(); + (v.len(), v.heap_size_of_children()) + }; BlockQueueInfo { - verified_queue_size: verification.verified.len(), - unverified_queue_size: verification.unverified.len(), - verifying_queue_size: verification.verifying.len(), + unverified_queue_size: unverified_len, + verifying_queue_size: verifying_len, + verified_queue_size: verified_len, max_queue_size: self.max_queue_size, max_mem_use: self.max_mem_use, mem_used: - verification.unverified.heap_size_of_children() - + verification.verifying.heap_size_of_children() - + verification.verified.heap_size_of_children(), + unverified_bytes + + verifying_bytes + + verified_bytes // TODO: https://github.com/servo/heapsize/pull/50 //+ self.processing.read().unwrap().heap_size_of_children(), } } - pub fn collect_garbage(&self) { + /// Optimise memory footprint of the heap fields. + pub fn collect_garbage(&self) { { - let mut verification = self.verification.lock().unwrap(); - verification.unverified.shrink_to_fit(); - verification.verifying.shrink_to_fit(); - verification.verified.shrink_to_fit(); + self.verification.unverified.lock().unwrap().shrink_to_fit(); + self.verification.verifying.lock().unwrap().shrink_to_fit(); + self.verification.verified.lock().unwrap().shrink_to_fit(); } self.processing.write().unwrap().shrink_to_fit(); } @@ -438,7 +465,7 @@ mod tests { #[test] fn can_import_blocks() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); if let Err(e) = queue.import_block(get_good_dummy_block()) { panic!("error importing block that is valid by definition({:?})", e); } @@ -446,7 +473,7 @@ mod tests { #[test] fn returns_error_for_duplicates() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); if let Err(e) = queue.import_block(get_good_dummy_block()) { panic!("error importing block that is valid by definition({:?})", e); } @@ -465,7 +492,7 @@ mod tests { #[test] fn returns_ok_for_drained_duplicates() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); let block = get_good_dummy_block(); let hash = BlockView::new(&block).header().hash().clone(); if let Err(e) = queue.import_block(block) { @@ -482,7 +509,7 @@ mod tests { #[test] fn returns_empty_once_finished() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition"); queue.flush(); queue.drain(1); @@ -496,7 +523,7 @@ mod tests { let engine = spec.to_engine().unwrap(); let mut config = BlockQueueConfig::default(); config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 - let mut queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); + let queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); assert!(!queue.queue_info().is_full()); let mut blocks = get_good_dummy_block_seq(50); for b in blocks.drain(..) { diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index ce639bfed..cf16a8834 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -18,6 +18,7 @@ use util::numbers::{U256,H256}; use header::BlockNumber; /// Brief info about inserted block. +#[derive(Clone)] pub struct BlockInfo { /// Block hash. pub hash: H256, @@ -30,6 +31,7 @@ pub struct BlockInfo { } /// Describes location of newly inserted block. +#[derive(Clone)] pub enum BlockLocation { /// It's part of the canon chain. CanonChain, @@ -42,6 +44,8 @@ pub enum BlockLocation { /// Hash of the newest common ancestor with old canon chain. ancestor: H256, /// Hashes of the blocks between ancestor and this block. - route: Vec + enacted: Vec, + /// Hashes of the blocks which were invalidated. + retracted: Vec, } } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e529f50af..36db9dded 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -16,6 +16,7 @@ //! Blockchain database. +use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder}; use util::*; use header::*; use extras::*; @@ -28,7 +29,7 @@ use blockchain::best_block::BestBlock; use blockchain::bloom_indexer::BloomIndexer; use blockchain::tree_route::TreeRoute; use blockchain::update::ExtrasUpdate; -use blockchain::CacheSize; +use blockchain::{CacheSize, ImportRoute}; const BLOOM_INDEX_SIZE: usize = 16; const BLOOM_LEVELS: u8 = 3; @@ -134,8 +135,9 @@ struct CacheManager { /// /// **Does not do input data verification.** pub struct BlockChain { - pref_cache_size: usize, - max_cache_size: usize, + // All locks must be captured in the order declared here. + pref_cache_size: AtomicUsize, + max_cache_size: AtomicUsize, best_block: RwLock, @@ -157,6 +159,8 @@ pub struct BlockChain { // blooms indexing bloom_indexer: BloomIndexer, + + insert_lock: Mutex<()> } impl FilterDataSource for BlockChain { @@ -262,8 +266,8 @@ impl BlockChain { (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); let bc = BlockChain { - pref_cache_size: config.pref_cache_size, - max_cache_size: config.max_cache_size, + pref_cache_size: AtomicUsize::new(config.pref_cache_size), + max_cache_size: AtomicUsize::new(config.max_cache_size), best_block: RwLock::new(BestBlock::default()), blocks: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()), @@ -275,7 +279,8 @@ impl BlockChain { extras_db: extras_db, blocks_db: blocks_db, cache_man: RwLock::new(cache_man), - bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS) + bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS), + insert_lock: Mutex::new(()), }; // load best block @@ -318,9 +323,9 @@ impl BlockChain { } /// Set the cache configuration. - pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) { - self.pref_cache_size = pref_cache_size; - self.max_cache_size = max_cache_size; + pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { + self.pref_cache_size.store(pref_cache_size, AtomicOrder::Relaxed); + self.max_cache_size.store(max_cache_size, AtomicOrder::Relaxed); } /// Returns a tree route between `from` and `to`, which is a tuple of: @@ -414,16 +419,17 @@ impl BlockChain { /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. - pub fn insert_block(&self, bytes: &[u8], receipts: Vec) { + pub fn insert_block(&self, bytes: &[u8], receipts: Vec) -> ImportRoute { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); let hash = header.sha3(); if self.is_known(&hash) { - return; + return ImportRoute::none(); } + let _lock = self.insert_lock.lock(); // store block in db self.blocks_db.put(&hash, &bytes).unwrap(); @@ -435,8 +441,10 @@ impl BlockChain { block_receipts: self.prepare_block_receipts_update(receipts, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), - info: info + info: info.clone(), }); + + ImportRoute::from(info) } /// Applies extras update. @@ -444,48 +452,58 @@ impl BlockChain { let batch = DBTransaction::new(); batch.put(b"best", &update.info.hash).unwrap(); - // update best block - let mut best_block = self.best_block.write().unwrap(); - match update.info.location { - BlockLocation::Branch => (), - _ => { - *best_block = BestBlock { - hash: update.info.hash, - number: update.info.number, - total_difficulty: update.info.total_difficulty - }; + { + let mut write_details = self.block_details.write().unwrap(); + for (hash, details) in update.block_details.into_iter() { + batch.put_extras(&hash, &details); + self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone())); + write_details.insert(hash, details); } } - let mut write_hashes = self.block_hashes.write().unwrap(); - for (number, hash) in &update.block_hashes { - batch.put_extras(number, hash); - write_hashes.remove(number); + { + let mut write_receipts = self.block_receipts.write().unwrap(); + for (hash, receipt) in &update.block_receipts { + batch.put_extras(hash, receipt); + write_receipts.remove(hash); + } } - let mut write_details = self.block_details.write().unwrap(); - for (hash, details) in update.block_details.into_iter() { - batch.put_extras(&hash, &details); - write_details.insert(hash.clone(), details); - self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash)); + { + let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); + for (bloom_hash, blocks_bloom) in &update.blocks_blooms { + batch.put_extras(bloom_hash, blocks_bloom); + write_blocks_blooms.remove(bloom_hash); + } } - let mut write_receipts = self.block_receipts.write().unwrap(); - for (hash, receipt) in &update.block_receipts { - batch.put_extras(hash, receipt); - write_receipts.remove(hash); - } + // These cached values must be updated last and togeterh + { + let mut best_block = self.best_block.write().unwrap(); + let mut write_hashes = self.block_hashes.write().unwrap(); + let mut write_txs = self.transaction_addresses.write().unwrap(); - let mut write_txs = self.transaction_addresses.write().unwrap(); - for (hash, tx_address) in &update.transactions_addresses { - batch.put_extras(hash, tx_address); - write_txs.remove(hash); - } + // update best block + match update.info.location { + BlockLocation::Branch => (), + _ => { + *best_block = BestBlock { + hash: update.info.hash, + number: update.info.number, + total_difficulty: update.info.total_difficulty + }; + } + } - let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); - for (bloom_hash, blocks_bloom) in &update.blocks_blooms { - batch.put_extras(bloom_hash, blocks_bloom); - write_blocks_blooms.remove(bloom_hash); + for (number, hash) in &update.block_hashes { + batch.put_extras(number, hash); + write_hashes.remove(number); + } + + for (hash, tx_address) in &update.transactions_addresses { + batch.put_extras(hash, tx_address); + write_txs.remove(hash); + } } // update extras database @@ -549,9 +567,14 @@ impl BlockChain { match route.blocks.len() { 0 => BlockLocation::CanonChain, - _ => BlockLocation::BranchBecomingCanonChain { - ancestor: route.ancestor, - route: route.blocks.into_iter().skip(route.index).collect() + _ => { + let retracted = route.blocks.iter().take(route.index).cloned().collect::>(); + + BlockLocation::BranchBecomingCanonChain { + ancestor: route.ancestor, + enacted: route.blocks.into_iter().skip(route.index).collect(), + retracted: retracted.into_iter().rev().collect(), + } } } } else { @@ -572,11 +595,11 @@ impl BlockChain { BlockLocation::CanonChain => { block_hashes.insert(number, info.hash.clone()); }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; - for (index, hash) in route.iter().cloned().enumerate() { + for (index, hash) in enacted.iter().cloned().enumerate() { block_hashes.insert(start_number + index as BlockNumber, hash); } @@ -661,11 +684,11 @@ impl BlockChain { ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels()) .add_bloom(&header.log_bloom(), header.number() as usize) }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; - let mut blooms: Vec = route.iter() + let mut blooms: Vec = enacted.iter() .map(|hash| self.block(hash).unwrap()) .map(|bytes| BlockView::new(&bytes).header_view().log_bloom()) .collect(); @@ -774,11 +797,10 @@ impl BlockChain { /// Ticks our cache system and throws out any old data. pub fn collect_garbage(&self) { - if self.cache_size().total() < self.pref_cache_size { return; } + if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; } for _ in 0..COLLECTION_QUEUE_SIZE { { - let mut cache_man = self.cache_man.write().unwrap(); let mut blocks = self.blocks.write().unwrap(); let mut block_details = self.block_details.write().unwrap(); let mut block_hashes = self.block_hashes.write().unwrap(); @@ -786,6 +808,7 @@ impl BlockChain { let mut block_logs = self.block_logs.write().unwrap(); let mut blocks_blooms = self.blocks_blooms.write().unwrap(); let mut block_receipts = self.block_receipts.write().unwrap(); + let mut cache_man = self.cache_man.write().unwrap(); for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { cache_man.in_use.remove(&id); @@ -812,7 +835,7 @@ impl BlockChain { blocks_blooms.shrink_to_fit(); block_receipts.shrink_to_fit(); } - if self.cache_size().total() < self.max_cache_size { break; } + if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; } } // TODO: m_lastCollection = chrono::system_clock::now(); @@ -825,7 +848,7 @@ mod tests { use rustc_serialize::hex::FromHex; use util::hash::*; use util::sha3::Hashable; - use blockchain::{BlockProvider, BlockChain, BlockChainConfig}; + use blockchain::{BlockProvider, BlockChain, BlockChainConfig, ImportRoute}; use tests::helpers::*; use devtools::*; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; @@ -943,10 +966,30 @@ mod tests { let temp = RandomTempPath::new(); let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path()); - bc.insert_block(&b1, vec![]); - bc.insert_block(&b2, vec![]); - bc.insert_block(&b3a, vec![]); - bc.insert_block(&b3b, vec![]); + let ir1 = bc.insert_block(&b1, vec![]); + let ir2 = bc.insert_block(&b2, vec![]); + let ir3b = bc.insert_block(&b3b, vec![]); + let ir3a = bc.insert_block(&b3a, vec![]); + + assert_eq!(ir1, ImportRoute { + enacted: vec![b1_hash], + retracted: vec![], + }); + + assert_eq!(ir2, ImportRoute { + enacted: vec![b2_hash], + retracted: vec![], + }); + + assert_eq!(ir3b, ImportRoute { + enacted: vec![b3b_hash], + retracted: vec![], + }); + + assert_eq!(ir3a, ImportRoute { + enacted: vec![b3a_hash], + retracted: vec![b3b_hash], + }); assert_eq!(bc.best_block_hash(), best_block_hash); assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0); diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs index 51e6294fc..88c9577e2 100644 --- a/ethcore/src/blockchain/generator/generator.rs +++ b/ethcore/src/blockchain/generator/generator.rs @@ -29,7 +29,7 @@ pub trait ChainIterator: Iterator + Sized { /// Blocks generated by fork will have lower difficulty than current chain. fn fork(&self, fork_number: usize) -> Fork where Self: Clone; /// Should be called to make every consecutive block have given bloom. - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self>; + fn with_bloom(&mut self, bloom: H2048) -> Bloom; /// Should be called to complete block. Without complete, block may have incorrect hash. fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>; /// Completes and generates block. @@ -44,7 +44,7 @@ impl ChainIterator for I where I: Iterator + Sized { } } - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self> { + fn with_bloom(&mut self, bloom: H2048) -> Bloom { Bloom { iter: self, bloom: bloom diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs new file mode 100644 index 000000000..262b70899 --- /dev/null +++ b/ethcore/src/blockchain/import_route.rs @@ -0,0 +1,119 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Import route. + +use util::hash::H256; +use blockchain::block_info::{BlockInfo, BlockLocation}; + +/// Import route for newly inserted block. +#[derive(Debug, PartialEq)] +pub struct ImportRoute { + /// Blocks that were invalidated by new block. + pub retracted: Vec, + /// Blocks that were validated by new block. + pub enacted: Vec, +} + +impl ImportRoute { + pub fn none() -> Self { + ImportRoute { + retracted: vec![], + enacted: vec![], + } + } +} + +impl From for ImportRoute { + fn from(info: BlockInfo) -> ImportRoute { + match info.location { + BlockLocation::CanonChain => ImportRoute { + retracted: vec![], + enacted: vec![info.hash], + }, + BlockLocation::Branch => ImportRoute::none(), + BlockLocation::BranchBecomingCanonChain { mut enacted, retracted, .. } => { + enacted.push(info.hash); + ImportRoute { + retracted: retracted, + enacted: enacted, + } + } + } + } +} + +#[cfg(test)] +mod tests { + use util::hash::H256; + use util::numbers::U256; + use blockchain::block_info::{BlockInfo, BlockLocation}; + use blockchain::ImportRoute; + + #[test] + fn import_route_none() { + assert_eq!(ImportRoute::none(), ImportRoute { + enacted: vec![], + retracted: vec![], + }); + } + + #[test] + fn import_route_branch() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::Branch, + }; + + assert_eq!(ImportRoute::from(info), ImportRoute::none()); + } + + #[test] + fn import_route_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::CanonChain, + }; + + assert_eq!(ImportRoute::from(info), ImportRoute { + retracted: vec![], + enacted: vec![H256::from(U256::from(1))], + }); + } + + #[test] + fn import_route_branch_becoming_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(2)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::BranchBecomingCanonChain { + ancestor: H256::from(U256::from(0)), + enacted: vec![H256::from(U256::from(1))], + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + } + }; + + assert_eq!(ImportRoute::from(info), ImportRoute { + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], + }); + } +} diff --git a/ethcore/src/blockchain/mod.rs b/ethcore/src/blockchain/mod.rs index b0679b563..29a4ee684 100644 --- a/ethcore/src/blockchain/mod.rs +++ b/ethcore/src/blockchain/mod.rs @@ -23,9 +23,11 @@ mod bloom_indexer; mod cache; mod tree_route; mod update; +mod import_route; #[cfg(test)] mod generator; pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig}; pub use self::cache::CacheSize; pub use self::tree_route::TreeRoute; +pub use self::import_route::ImportRoute; diff --git a/ethcore/src/chainfilter/tests.rs b/ethcore/src/chainfilter/tests.rs index 08af44720..7dac29f11 100644 --- a/ethcore/src/chainfilter/tests.rs +++ b/ethcore/src/chainfilter/tests.rs @@ -28,9 +28,15 @@ pub struct MemoryCache { blooms: HashMap, } +impl Default for MemoryCache { + fn default() -> Self { + MemoryCache::new() + } +} + impl MemoryCache { /// Default constructor for MemoryCache - pub fn new() -> MemoryCache { + pub fn new() -> Self { MemoryCache { blooms: HashMap::new() } } diff --git a/ethcore/src/client.rs b/ethcore/src/client/client.rs similarity index 50% rename from ethcore/src/client.rs rename to ethcore/src/client/client.rs index 9688cc527..caa92db97 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client/client.rs @@ -17,10 +17,8 @@ //! Blockchain database client. use std::marker::PhantomData; -use std::sync::atomic::AtomicBool; use util::*; use util::panics::*; -use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; use header::{BlockNumber}; @@ -28,42 +26,18 @@ use state::State; use spec::Spec; use engine::Engine; use views::HeaderView; -use block_queue::BlockQueue; use service::{NetSyncMessage, SyncMessage}; use env_info::LastHashes; use verification::*; use block::*; -use transaction::LocalizedTransaction; +use transaction::{LocalizedTransaction, SignedTransaction}; use extras::TransactionAddress; use filter::Filter; use log_entry::LocalizedLogEntry; -use util::keys::store::SecretStore; -pub use block_queue::{BlockQueueConfig, BlockQueueInfo}; -pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize}; - -/// Uniquely identifies block. -#[derive(Debug, PartialEq, Clone)] -pub enum BlockId { - /// Block's sha3. - /// Querying by hash is always faster. - Hash(H256), - /// Block number within canon blockchain. - Number(BlockNumber), - /// Earliest block (genesis). - Earliest, - /// Latest mined block. - Latest -} - -/// Uniquely identifies transaction. -#[derive(Debug, PartialEq, Clone)] -pub enum TransactionId { - /// Transaction's sha3. - Hash(H256), - /// Block id and transaction index within this block. - /// Querying by block position is always faster. - Location(BlockId, usize) -} +use block_queue::{BlockQueue, BlockQueueInfo}; +use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; +use client::{BlockId, TransactionId, ClientConfig, BlockChainClient}; +pub use blockchain::CacheSize as BlockChainCacheSize; /// General block status #[derive(Debug, Eq, PartialEq)] @@ -78,27 +52,6 @@ pub enum BlockStatus { Unknown, } -/// Client configuration. Includes configs for all sub-systems. -#[derive(Debug)] -pub struct ClientConfig { - /// Block queue configuration. - pub queue: BlockQueueConfig, - /// Blockchain configuration. - pub blockchain: BlockChainConfig, - /// Prefer journal rather than archive. - pub prefer_journal: bool, -} - -impl Default for ClientConfig { - fn default() -> ClientConfig { - ClientConfig { - queue: Default::default(), - blockchain: Default::default(), - prefer_journal: false, - } - } -} - /// Information about the blockchain gathered together. #[derive(Debug)] pub struct BlockChainInfo { @@ -120,69 +73,8 @@ impl fmt::Display for BlockChainInfo { } } -/// Blockchain database client. Owns and manages a blockchain and a block queue. -pub trait BlockChainClient : Sync + Send { - /// Get raw block header data by block id. - fn block_header(&self, id: BlockId) -> Option; - - /// Get raw block body data by block id. - /// Block body is an RLP list of two items: uncles and transactions. - fn block_body(&self, id: BlockId) -> Option; - - /// Get raw block data by block header hash. - fn block(&self, id: BlockId) -> Option; - - /// Get block status by block header hash. - fn block_status(&self, id: BlockId) -> BlockStatus; - - /// Get block total difficulty. - fn block_total_difficulty(&self, id: BlockId) -> Option; - - /// Get block hash. - fn block_hash(&self, id: BlockId) -> Option; - - /// Get address code. - fn code(&self, address: &Address) -> Option; - - /// Get transaction with given hash. - fn transaction(&self, id: TransactionId) -> Option; - - /// Get a tree route between `from` and `to`. - /// See `BlockChain::tree_route`. - fn tree_route(&self, from: &H256, to: &H256) -> Option; - - /// Get latest state node - fn state_data(&self, hash: &H256) -> Option; - - /// Get raw block receipts data by block header hash. - fn block_receipts(&self, hash: &H256) -> Option; - - /// Import a block into the blockchain. - fn import_block(&self, bytes: Bytes) -> ImportResult; - - /// Get block queue information. - fn queue_info(&self) -> BlockQueueInfo; - - /// Clear block queue and abort all import activity. - fn clear_queue(&self); - - /// Get blockchain information. - fn chain_info(&self) -> BlockChainInfo; - - /// Get the best block header. - fn best_block_header(&self) -> Bytes { - self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap() - } - - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; - - /// Returns logs matching given filter. - fn logs(&self, filter: Filter) -> Vec; -} - -#[derive(Default, Clone, Debug, Eq, PartialEq)] /// Report on the status of a client. +#[derive(Default, Clone, Debug, Eq, PartialEq)] pub struct ClientReport { /// How many blocks have been imported so far. pub blocks_imported: usize, @@ -206,25 +98,23 @@ impl ClientReport { /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client where V: Verifier { - chain: Arc>, + chain: Arc, engine: Arc>, - state_db: Mutex, - block_queue: RwLock, + state_db: Mutex>, + block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, panic_handler: Arc, - - // for sealing... - sealing_enabled: AtomicBool, - sealing_block: Mutex>, - author: RwLock
, - extra_data: RwLock, verifier: PhantomData, - secret_store: Arc>, } -const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "5.1"; +const HISTORY: u64 = 1200; +// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. +// Altering it will force a blanket DB update for *all* JournalDB-derived +// databases. +// Instead, add/upgrade the version string of the individual JournalDB-derived database +// of which you actually want force an upgrade. +const CLIENT_DB_VER_STR: &'static str = "5.3"; impl Client { /// Create a new client with given spec and DB path. @@ -239,16 +129,19 @@ impl Client where V: Verifier { let mut dir = path.to_path_buf(); dir.push(H64::from(spec.genesis_header().hash()).hex()); //TODO: sec/fat: pruned/full versioning - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" })); + // version here is a bit useless now, since it's controlled only be the pruning algo. + dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, config.pruning)); let path = dir.as_path(); let gb = spec.genesis_block(); - let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path))); + let chain = Arc::new(BlockChain::new(config.blockchain, &gb, path)); let mut state_path = path.to_path_buf(); state_path.push("state"); let engine = Arc::new(try!(spec.to_engine())); - let mut state_db = JournalDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal); - if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) { + let state_path_str = state_path.to_str().unwrap(); + let mut state_db = journaldb::new(state_path_str, config.pruning); + + if state_db.is_empty() && engine.spec().ensure_db_good(state_db.as_hashdb_mut()) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } @@ -256,38 +149,29 @@ impl Client where V: Verifier { let panic_handler = PanicHandler::new_in_arc(); panic_handler.forward_from(&block_queue); - let secret_store = Arc::new(RwLock::new(SecretStore::new())); - secret_store.write().unwrap().try_import_existing(); - Ok(Arc::new(Client { chain: chain, engine: engine, state_db: Mutex::new(state_db), - block_queue: RwLock::new(block_queue), + block_queue: block_queue, report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler, - sealing_enabled: AtomicBool::new(false), - sealing_block: Mutex::new(None), - author: RwLock::new(Address::new()), - extra_data: RwLock::new(Vec::new()), verifier: PhantomData, - secret_store: secret_store, })) } /// Flush the block import queue. pub fn flush_queue(&self) { - self.block_queue.write().unwrap().flush(); + self.block_queue.flush(); } fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { let mut last_hashes = LastHashes::new(); last_hashes.resize(256, H256::new()); last_hashes[0] = parent_hash; - let chain = self.chain.read().unwrap(); for i in 0..255 { - match chain.block_details(&last_hashes[i]) { + match self.chain.block_details(&last_hashes[i]) { Some(details) => { last_hashes[i + 1] = details.parent.clone(); }, @@ -297,31 +181,26 @@ impl Client where V: Verifier { last_hashes } - /// Secret store (key manager) - pub fn secret_store(&self) -> &Arc> { - &self.secret_store - } - fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result { let engine = self.engine.deref().deref(); let header = &block.header; // Check the block isn't so old we won't be able to enact it. - let best_block_number = self.chain.read().unwrap().best_block_number(); + let best_block_number = self.chain.best_block_number(); if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); return Err(()); } // Verify Block Family - let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref()); + let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.deref()); if let Err(e) = verify_family_result { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); }; // Check if Parent is in chain - let chain_has_parent = self.chain.read().unwrap().block_header(&header.parent_hash); + let chain_has_parent = self.chain.block_header(&header.parent_hash); if let None = chain_has_parent { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); return Err(()); @@ -330,7 +209,7 @@ impl Client where V: Verifier { // Enact Verified Block let parent = chain_has_parent.unwrap(); let last_hashes = self.build_last_hashes(header.parent_hash.clone()); - let db = self.state_db.lock().unwrap().clone(); + let db = self.state_db.lock().unwrap().spawn(); let enact_result = enact_verified(&block, engine, db, &parent, last_hashes); if let Err(e) = enact_result { @@ -348,75 +227,111 @@ impl Client where V: Verifier { Ok(closed_block) } + fn calculate_enacted_retracted(&self, import_results: Vec) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } + + // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| { + for hash in route.enacted { + map.insert(hash, true); + } + for hash in route.retracted { + map.insert(hash, false); + } + map + }); + + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } + /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { let max_blocks_to_import = 128; - let mut good_blocks = Vec::with_capacity(max_blocks_to_import); - let mut bad_blocks = HashSet::new(); + let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); + let mut invalid_blocks = HashSet::new(); + let mut import_results = Vec::with_capacity(max_blocks_to_import); let _import_lock = self.import_lock.lock(); - let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import); + let blocks = self.block_queue.drain(max_blocks_to_import); let original_best = self.chain_info().best_block_hash; for block in blocks { let header = &block.header; - if bad_blocks.contains(&header.parent_hash) { - bad_blocks.insert(header.hash()); + if invalid_blocks.contains(&header.parent_hash) { + invalid_blocks.insert(header.hash()); continue; } - let closed_block = self.check_and_close_block(&block); if let Err(_) = closed_block { - bad_blocks.insert(header.hash()); + invalid_blocks.insert(header.hash()); break; } + imported_blocks.push(header.hash()); - // Insert block - let closed_block = closed_block.unwrap(); - self.chain.write().unwrap().insert_block(&block.bytes, closed_block.block().receipts().clone()); - good_blocks.push(header.hash()); - + // Are we committing an era? let ancient = if header.number() >= HISTORY { let n = header.number() - HISTORY; - let chain = self.chain.read().unwrap(); - Some((n, chain.block_hash(n).unwrap())) + Some((n, self.chain.block_hash(n).unwrap())) } else { None }; // Commit results + let closed_block = closed_block.unwrap(); + let receipts = closed_block.block().receipts().clone(); closed_block.drain() .commit(header.number(), &header.hash(), ancient) .expect("State DB commit failed."); + // And update the chain after commit to prevent race conditions + // (when something is in chain but you are not able to fetch details) + let route = self.chain.insert_block(&block.bytes, receipts); + import_results.push(route); + self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } - let imported = good_blocks.len(); - let bad_blocks = bad_blocks.into_iter().collect::>(); + let imported = imported_blocks.len(); + let invalid_blocks = invalid_blocks.into_iter().collect::>(); { - let mut block_queue = self.block_queue.write().unwrap(); - block_queue.mark_as_bad(&bad_blocks); - block_queue.mark_as_good(&good_blocks); + if !invalid_blocks.is_empty() { + self.block_queue.mark_as_bad(&invalid_blocks); + } + if !imported_blocks.is_empty() { + self.block_queue.mark_as_good(&imported_blocks); + } } { - let block_queue = self.block_queue.read().unwrap(); - if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { + if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + let (enacted, retracted) = self.calculate_enacted_retracted(import_results); io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - good: good_blocks, - bad: bad_blocks, + imported: imported_blocks, + invalid: invalid_blocks, + enacted: enacted, + retracted: retracted, })).unwrap(); } } - if self.chain_info().best_block_hash != original_best && self.sealing_enabled.load(atomic::Ordering::Relaxed) { - self.prepare_sealing(); + { + if self.chain_info().best_block_hash != original_best { + io.send(NetworkIoMessage::User(SyncMessage::NewChainHead)).unwrap(); + } } imported @@ -424,12 +339,12 @@ impl Client where V: Verifier { /// Get a copy of the best block's state. pub fn state(&self) -> State { - State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) + State::from_existing(self.state_db.lock().unwrap().spawn(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) } /// Get info on the cache. pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.read().unwrap().cache_size() + self.chain.cache_size() } /// Get the report. @@ -441,13 +356,13 @@ impl Client where V: Verifier { /// Tick the client. pub fn tick(&self) { - self.chain.read().unwrap().collect_garbage(); - self.block_queue.read().unwrap().collect_garbage(); + self.chain.collect_garbage(); + self.block_queue.collect_garbage(); } /// Set up the cache behaviour. pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { - self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size); + self.chain.configure_cache(pref_cache_size, max_cache_size); } fn block_hash(chain: &BlockChain, id: BlockId) -> Option { @@ -462,99 +377,70 @@ impl Client where V: Verifier { fn block_number(&self, id: BlockId) -> Option { match id { BlockId::Number(number) => Some(number), - BlockId::Hash(ref hash) => self.chain.read().unwrap().block_number(hash), + BlockId::Hash(ref hash) => self.chain.block_number(hash), BlockId::Earliest => Some(0), - BlockId::Latest => Some(self.chain.read().unwrap().best_block_number()) - } - } - - /// Get the author that we will seal blocks as. - pub fn author(&self) -> Address { - self.author.read().unwrap().clone() - } - - /// Set the author that we will seal blocks as. - pub fn set_author(&self, author: Address) { - *self.author.write().unwrap() = author; - } - - /// Get the extra_data that we will seal blocks wuth. - pub fn extra_data(&self) -> Bytes { - self.extra_data.read().unwrap().clone() - } - - /// Set the extra_data that we will seal blocks with. - pub fn set_extra_data(&self, extra_data: Bytes) { - *self.extra_data.write().unwrap() = extra_data; - } - - /// New chain head event. Restart mining operation. - pub fn prepare_sealing(&self) { - let h = self.chain.read().unwrap().best_block_hash(); - let mut b = OpenBlock::new( - self.engine.deref().deref(), - self.state_db.lock().unwrap().clone(), - match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} }, - self.build_last_hashes(h.clone()), - self.author(), - self.extra_data() - ); - - self.chain.read().unwrap().find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); }); - - // TODO: push transactions. - - let b = b.close(); - trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number()); - *self.sealing_block.lock().unwrap() = Some(b); - } - - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - pub fn sealing_block(&self) -> &Mutex> { - if self.sealing_block.lock().unwrap().is_none() { - self.sealing_enabled.store(true, atomic::Ordering::Relaxed); - // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. - self.prepare_sealing(); - } - &self.sealing_block - } - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - pub fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let mut maybe_b = self.sealing_block.lock().unwrap(); - match *maybe_b { - Some(ref b) if b.hash() == pow_hash => {} - _ => { return Err(Error::PowHashInvalid); } - } - - let b = maybe_b.take(); - match b.unwrap().try_seal(self.engine.deref().deref(), seal) { - Err(old) => { - *maybe_b = Some(old); - Err(Error::PowInvalid) - } - Ok(sealed) => { - // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. - try!(self.import_block(sealed.rlp_bytes())); - Ok(()) - } + BlockId::Latest => Some(self.chain.best_block_number()) } } } -// TODO: need MinerService MinerIoHandler - impl BlockChainClient for Client where V: Verifier { + // TODO [todr] Should be moved to miner crate eventually. + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { + block.try_seal(self.engine.deref().deref(), seal) + } + + // TODO [todr] Should be moved to miner crate eventually. + fn prepare_sealing(&self, author: Address, gas_floor_target: U256, extra_data: Bytes, transactions: Vec) -> Option { + let engine = self.engine.deref().deref(); + let h = self.chain.best_block_hash(); + + let mut b = OpenBlock::new( + engine, + self.state_db.lock().unwrap().spawn(), + match self.chain.block_header(&h) { Some(ref x) => x, None => {return None} }, + self.build_last_hashes(h.clone()), + author, + gas_floor_target, + extra_data, + ); + + // Add uncles + self.chain + .find_uncle_headers(&h, engine.maximum_uncle_age()) + .unwrap() + .into_iter() + .take(engine.maximum_uncle_count()) + .foreach(|h| { + b.push_uncle(h).unwrap(); + }); + + // Add transactions + let block_number = b.block().header().number(); + for tx in transactions { + let import = b.push_transaction(tx, None); + if let Err(e) = import { + trace!("Error adding transaction to block: number={}. Error: {:?}", block_number, e); + } + } + + // And close + let b = b.close(); + trace!("Sealing: number={}, hash={}, diff={}", + b.block().header().number(), + b.hash(), + b.block().header().difficulty() + ); + Some(b) + } + fn block_header(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) } fn block_body(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| { - chain.block(&hash).map(|bytes| { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash).map(|bytes| { let rlp = Rlp::new(&bytes); let mut body = RlpStream::new_list(2); body.append_raw(rlp.at(1).as_raw(), 1); @@ -565,102 +451,109 @@ impl BlockChainClient for Client where V: Verifier { } fn block(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| { - chain.block(&hash) + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash) }) } fn block_status(&self, id: BlockId) -> BlockStatus { - let chain = self.chain.read().unwrap(); - match Self::block_hash(&chain, id) { - Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.block_queue.read().unwrap().block_status(&hash), + match Self::block_hash(&self.chain, id) { + Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, + Some(hash) => self.block_queue.block_status(&hash), None => BlockStatus::Unknown } } fn block_total_difficulty(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) + } + + fn nonce(&self, address: &Address) -> U256 { + self.state().nonce(address) } fn block_hash(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id) + Self::block_hash(&self.chain, id) } fn code(&self, address: &Address) -> Option { self.state().code(address) } + fn balance(&self, address: &Address) -> U256 { + self.state().balance(address) + } + + fn storage_at(&self, address: &Address, position: &H256) -> H256 { + self.state().storage_at(address, position) + } + fn transaction(&self, id: TransactionId) -> Option { - let chain = self.chain.read().unwrap(); match id { - TransactionId::Hash(ref hash) => chain.transaction_address(hash), - TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress { + TransactionId::Hash(ref hash) => self.chain.transaction_address(hash), + TransactionId::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { block_hash: hash, index: index }) - }.and_then(|address| chain.transaction(&address)) + }.and_then(|address| self.chain.transaction(&address)) } fn tree_route(&self, from: &H256, to: &H256) -> Option { - let chain = self.chain.read().unwrap(); - match chain.is_known(from) && chain.is_known(to) { - true => Some(chain.tree_route(from.clone(), to.clone())), + match self.chain.is_known(from) && self.chain.is_known(to) { + true => Some(self.chain.tree_route(from.clone(), to.clone())), false => None } } - fn state_data(&self, _hash: &H256) -> Option { - None + fn state_data(&self, hash: &H256) -> Option { + self.state_db.lock().unwrap().state(hash) } - fn block_receipts(&self, _hash: &H256) -> Option { - None + fn block_receipts(&self, hash: &H256) -> Option { + self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) } fn import_block(&self, bytes: Bytes) -> ImportResult { { let header = BlockView::new(&bytes).header_view(); - if self.chain.read().unwrap().is_known(&header.sha3()) { + if self.chain.is_known(&header.sha3()) { return Err(x!(ImportError::AlreadyInChain)); } if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown { return Err(x!(BlockError::UnknownParent(header.parent_hash()))); } } - self.block_queue.write().unwrap().import_block(bytes) + self.block_queue.import_block(bytes) } fn queue_info(&self) -> BlockQueueInfo { - self.block_queue.read().unwrap().queue_info() + self.block_queue.queue_info() } fn clear_queue(&self) { - self.block_queue.write().unwrap().clear(); + self.block_queue.clear(); } fn chain_info(&self) -> BlockChainInfo { - let chain = self.chain.read().unwrap(); BlockChainInfo { - total_difficulty: chain.best_block_total_difficulty(), - pending_total_difficulty: chain.best_block_total_difficulty(), - genesis_hash: chain.genesis_hash(), - best_block_hash: chain.best_block_hash(), - best_block_number: From::from(chain.best_block_number()) + total_difficulty: self.chain.best_block_total_difficulty(), + pending_total_difficulty: self.chain.best_block_total_difficulty(), + genesis_hash: self.chain.genesis_hash(), + best_block_hash: self.chain.best_block_hash(), + best_block_number: From::from(self.chain.best_block_number()) } } fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option> { match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.read().unwrap().blocks_with_bloom(bloom, from, to)), + (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), _ => None } } fn logs(&self, filter: Filter) -> Vec { + // TODO: lock blockchain only once + let mut blocks = filter.bloom_possibilities().iter() .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .flat_map(|m| m) @@ -672,9 +565,9 @@ impl BlockChainClient for Client where V: Verifier { blocks.sort(); blocks.into_iter() - .filter_map(|number| self.chain.read().unwrap().block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.read().unwrap().block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.read().unwrap().block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) + .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) .flat_map(|(number, hash, receipts, hashes)| { let mut log_index = 0; receipts.into_iter() diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs new file mode 100644 index 000000000..89e95ea06 --- /dev/null +++ b/ethcore/src/client/config.rs @@ -0,0 +1,32 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub use block_queue::BlockQueueConfig; +pub use blockchain::BlockChainConfig; +use util::journaldb; + +/// Client configuration. Includes configs for all sub-systems. +#[derive(Debug, Default)] +pub struct ClientConfig { + /// Block queue configuration. + pub queue: BlockQueueConfig, + /// Blockchain configuration. + pub blockchain: BlockChainConfig, + /// The JournalDB ("pruning") algorithm to use. + pub pruning: journaldb::Algorithm, + /// The name of the client instance. + pub name: String, +} diff --git a/ethcore/src/client/ids.rs b/ethcore/src/client/ids.rs new file mode 100644 index 000000000..303657a76 --- /dev/null +++ b/ethcore/src/client/ids.rs @@ -0,0 +1,44 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Unique identifiers. + +use util::hash::H256; +use header::BlockNumber; + +/// Uniquely identifies block. +#[derive(Debug, PartialEq, Clone)] +pub enum BlockId { + /// Block's sha3. + /// Querying by hash is always faster. + Hash(H256), + /// Block number within canon blockchain. + Number(BlockNumber), + /// Earliest block (genesis). + Earliest, + /// Latest mined block. + Latest +} + +/// Uniquely identifies transaction. +#[derive(Debug, PartialEq, Clone)] +pub enum TransactionId { + /// Transaction's sha3. + Hash(H256), + /// Block id and transaction index within this block. + /// Querying by block position is always faster. + Location(BlockId, usize) +} diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs new file mode 100644 index 000000000..88e07d0b1 --- /dev/null +++ b/ethcore/src/client/mod.rs @@ -0,0 +1,120 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Blockchain database client. + +mod client; +mod config; +mod ids; +mod test_client; + +pub use self::client::*; +pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig}; +pub use self::ids::{BlockId, TransactionId}; +pub use self::test_client::{TestBlockChainClient, EachBlockWith}; + +use util::bytes::Bytes; +use util::hash::{Address, H256, H2048}; +use util::numbers::U256; +use blockchain::TreeRoute; +use block_queue::BlockQueueInfo; +use block::{ClosedBlock, SealedBlock}; +use header::BlockNumber; +use transaction::{LocalizedTransaction, SignedTransaction}; +use log_entry::LocalizedLogEntry; +use filter::Filter; +use error::{ImportResult}; + +/// Blockchain database client. Owns and manages a blockchain and a block queue. +pub trait BlockChainClient : Sync + Send { + /// Get raw block header data by block id. + fn block_header(&self, id: BlockId) -> Option; + + /// Get raw block body data by block id. + /// Block body is an RLP list of two items: uncles and transactions. + fn block_body(&self, id: BlockId) -> Option; + + /// Get raw block data by block header hash. + fn block(&self, id: BlockId) -> Option; + + /// Get block status by block header hash. + fn block_status(&self, id: BlockId) -> BlockStatus; + + /// Get block total difficulty. + fn block_total_difficulty(&self, id: BlockId) -> Option; + + /// Get address nonce. + fn nonce(&self, address: &Address) -> U256; + + /// Get block hash. + fn block_hash(&self, id: BlockId) -> Option; + + /// Get address code. + fn code(&self, address: &Address) -> Option; + + /// Get address balance. + fn balance(&self, address: &Address) -> U256; + + /// Get value of the storage at given position. + fn storage_at(&self, address: &Address, position: &H256) -> H256; + + /// Get transaction with given hash. + fn transaction(&self, id: TransactionId) -> Option; + + /// Get a tree route between `from` and `to`. + /// See `BlockChain::tree_route`. + fn tree_route(&self, from: &H256, to: &H256) -> Option; + + /// Get latest state node + fn state_data(&self, hash: &H256) -> Option; + + /// Get raw block receipts data by block header hash. + fn block_receipts(&self, hash: &H256) -> Option; + + /// Import a block into the blockchain. + fn import_block(&self, bytes: Bytes) -> ImportResult; + + /// Get block queue information. + fn queue_info(&self) -> BlockQueueInfo; + + /// Clear block queue and abort all import activity. + fn clear_queue(&self); + + /// Get blockchain information. + fn chain_info(&self) -> BlockChainInfo; + + /// Get the best block header. + fn best_block_header(&self) -> Bytes { + // TODO: lock blockchain only once + self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap() + } + + /// Returns numbers of blocks containing given bloom. + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; + + /// Returns logs matching given filter. + fn logs(&self, filter: Filter) -> Vec; + + // TODO [todr] Should be moved to miner crate eventually. + /// Returns ClosedBlock prepared for sealing. + fn prepare_sealing(&self, author: Address, gas_floor_target: U256, extra_data: Bytes, transactions: Vec) -> Option; + + // TODO [todr] Should be moved to miner crate eventually. + /// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error. + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result; + +} + diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs new file mode 100644 index 000000000..83511b1cc --- /dev/null +++ b/ethcore/src/client/test_client.rs @@ -0,0 +1,376 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Test client. + +use util::*; +use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; +use blockchain::TreeRoute; +use client::{BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId}; +use header::{Header as BlockHeader, BlockNumber}; +use filter::Filter; +use log_entry::LocalizedLogEntry; +use receipt::Receipt; +use extras::BlockReceipts; +use error::{ImportResult}; + +use block_queue::BlockQueueInfo; +use block::{SealedBlock, ClosedBlock}; + +/// Test client. +pub struct TestBlockChainClient { + /// Blocks. + pub blocks: RwLock>, + /// Mapping of numbers to hashes. + pub numbers: RwLock>, + /// Genesis block hash. + pub genesis_hash: H256, + /// Last block hash. + pub last_hash: RwLock, + /// Difficulty. + pub difficulty: RwLock, + /// Balances. + pub balances: RwLock>, + /// Storage. + pub storage: RwLock>, + /// Code. + pub code: RwLock>, +} + +#[derive(Clone)] +/// Used for generating test client blocks. +pub enum EachBlockWith { + /// Plain block. + Nothing, + /// Block with an uncle. + Uncle, + /// Block with a transaction. + Transaction, + /// Block with an uncle and transaction. + UncleAndTransaction +} + +impl Default for TestBlockChainClient { + fn default() -> Self { + TestBlockChainClient::new() + } +} + +impl TestBlockChainClient { + /// Creates new test client. + pub fn new() -> Self { + + let mut client = TestBlockChainClient { + blocks: RwLock::new(HashMap::new()), + numbers: RwLock::new(HashMap::new()), + genesis_hash: H256::new(), + last_hash: RwLock::new(H256::new()), + difficulty: RwLock::new(From::from(0)), + balances: RwLock::new(HashMap::new()), + storage: RwLock::new(HashMap::new()), + code: RwLock::new(HashMap::new()), + }; + client.add_blocks(1, EachBlockWith::Nothing); // add genesis block + client.genesis_hash = client.last_hash.read().unwrap().clone(); + client + } + + /// Set the balance of account `address` to `balance`. + pub fn set_balance(&self, address: Address, balance: U256) { + self.balances.write().unwrap().insert(address, balance); + } + + /// Set `code` at `address`. + pub fn set_code(&self, address: Address, code: Bytes) { + self.code.write().unwrap().insert(address, code); + } + + /// Set storage `position` to `value` for account `address`. + pub fn set_storage(&self, address: Address, position: H256, value: H256) { + self.storage.write().unwrap().insert((address, position), value); + } + + /// Add blocks to test client. + pub fn add_blocks(&self, count: usize, with: EachBlockWith) { + let len = self.numbers.read().unwrap().len(); + for n in len..(len + count) { + let mut header = BlockHeader::new(); + header.difficulty = From::from(n); + header.parent_hash = self.last_hash.read().unwrap().clone(); + header.number = n as BlockNumber; + let uncles = match with { + EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { + let mut uncles = RlpStream::new_list(1); + let mut uncle_header = BlockHeader::new(); + uncle_header.difficulty = From::from(n); + uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); + uncle_header.number = n as BlockNumber; + uncles.append(&uncle_header); + header.uncles_hash = uncles.as_raw().sha3(); + uncles + }, + _ => RlpStream::new_list(0) + }; + let txs = match with { + EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { + let mut txs = RlpStream::new_list(1); + let keypair = KeyPair::create().unwrap(); + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: U256::zero() + }; + let signed_tx = tx.sign(&keypair.secret()); + txs.append(&signed_tx); + txs.out() + }, + _ => rlp::NULL_RLP.to_vec() + }; + + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&txs, 1); + rlp.append_raw(uncles.as_raw(), 1); + self.import_block(rlp.as_raw().to_vec()).unwrap(); + } + } + + /// TODO: + pub fn corrupt_block(&mut self, n: BlockNumber) { + let hash = self.block_hash(BlockId::Number(n)).unwrap(); + let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); + header.parent_hash = H256::new(); + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&rlp::NULL_RLP, 1); + rlp.append_raw(&rlp::NULL_RLP, 1); + self.blocks.write().unwrap().insert(hash, rlp.out()); + } + + /// TODO: + pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { + let blocks_read = self.numbers.read().unwrap(); + let index = blocks_read.len() - delta; + blocks_read[&index].clone() + } + + fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(hash) => Some(hash), + BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(), + BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(), + BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned() + } + } +} + +impl BlockChainClient for TestBlockChainClient { + fn block_total_difficulty(&self, _id: BlockId) -> Option { + Some(U256::zero()) + } + + fn block_hash(&self, _id: BlockId) -> Option { + unimplemented!(); + } + + fn nonce(&self, _address: &Address) -> U256 { + U256::zero() + } + + fn code(&self, address: &Address) -> Option { + self.code.read().unwrap().get(address).cloned() + } + + fn balance(&self, address: &Address) -> U256 { + self.balances.read().unwrap().get(address).cloned().unwrap_or_else(U256::zero) + } + + fn storage_at(&self, address: &Address, position: &H256) -> H256 { + self.storage.read().unwrap().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new) + } + + fn transaction(&self, _id: TransactionId) -> Option { + unimplemented!(); + } + + fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { + unimplemented!(); + } + + fn logs(&self, _filter: Filter) -> Vec { + unimplemented!(); + } + + fn prepare_sealing(&self, _author: Address, _gas_floor_target: U256, _extra_data: Bytes, _transactions: Vec) -> Option { + unimplemented!() + } + + fn try_seal(&self, _block: ClosedBlock, _seal: Vec) -> Result { + unimplemented!() + } + + fn block_header(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) + } + + fn block_body(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| { + let mut stream = RlpStream::new_list(2); + stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); + stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); + stream.out() + })) + } + + fn block(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned()) + } + + fn block_status(&self, id: BlockId) -> BlockStatus { + match id { + BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain, + BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain, + _ => BlockStatus::Unknown + } + } + + // works only if blocks are one after another 1 -> 2 -> 3 + fn tree_route(&self, from: &H256, to: &H256) -> Option { + Some(TreeRoute { + ancestor: H256::new(), + index: 0, + blocks: { + let numbers_read = self.numbers.read().unwrap(); + let mut adding = false; + + let mut blocks = Vec::new(); + for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { + if hash == to { + if adding { + blocks.push(hash.clone()); + } + adding = false; + break; + } + if hash == from { + adding = true; + } + if adding { + blocks.push(hash.clone()); + } + } + if adding { Vec::new() } else { blocks } + } + }) + } + + // TODO: returns just hashes instead of node state rlp(?) + fn state_data(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let mut rlp = RlpStream::new(); + rlp.append(&hash.clone()); + return Some(rlp.out()); + } + None + } + + fn block_receipts(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let receipt = BlockReceipts::new(vec![Receipt::new( + H256::zero(), + U256::zero(), + vec![])]); + let mut rlp = RlpStream::new(); + rlp.append(&receipt); + return Some(rlp.out()); + } + None + } + + fn import_block(&self, b: Bytes) -> ImportResult { + let header = Rlp::new(&b).val_at::(0); + let h = header.hash(); + let number: usize = header.number as usize; + if number > self.blocks.read().unwrap().len() { + panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); + } + if number > 0 { + match self.blocks.read().unwrap().get(&header.parent_hash) { + Some(parent) => { + let parent = Rlp::new(parent).val_at::(0); + if parent.number != (header.number - 1) { + panic!("Unexpected block parent"); + } + }, + None => { + panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); + } + } + } + let len = self.numbers.read().unwrap().len(); + if number == len { + { + let mut difficulty = self.difficulty.write().unwrap(); + *difficulty.deref_mut() = *difficulty.deref() + header.difficulty; + } + mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); + self.blocks.write().unwrap().insert(h.clone(), b); + self.numbers.write().unwrap().insert(number, h.clone()); + let mut parent_hash = header.parent_hash; + if number > 0 { + let mut n = number - 1; + while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { + *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); + n -= 1; + parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; + } + } + } + else { + self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); + } + Ok(h) + } + + fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { + verified_queue_size: 0, + unverified_queue_size: 0, + verifying_queue_size: 0, + max_queue_size: 0, + max_mem_use: 0, + mem_used: 0, + } + } + + fn clear_queue(&self) { + } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainInfo { + total_difficulty: *self.difficulty.read().unwrap(), + pending_total_difficulty: *self.difficulty.read().unwrap(), + genesis_hash: self.genesis_hash.clone(), + best_block_hash: self.last_hash.read().unwrap().clone(), + best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, + } + } +} diff --git a/ethcore/src/engine.rs b/ethcore/src/engine.rs index 83e1986fd..0b2ce8ae2 100644 --- a/ethcore/src/engine.rs +++ b/ethcore/src/engine.rs @@ -85,7 +85,7 @@ pub trait Engine : Sync + Send { /// Don't forget to call Super::populate_from_parent when subclassing & overriding. // TODO: consider including State in the params. - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + fn populate_from_parent(&self, header: &mut Header, parent: &Header, _gas_floor_target: U256) { header.difficulty = parent.difficulty; header.gas_limit = parent.gas_limit; header.note_dirty(); diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 824d8da90..72127c754 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -63,8 +63,15 @@ pub enum ExecutionError { } #[derive(Debug)] -/// Errors concerning transaction proessing. +/// Errors concerning transaction processing. pub enum TransactionError { + /// Transaction's gas price is below threshold. + InsufficientGasPrice { + /// Minimal expected gas price + minimal: U256, + /// Transaction gas price + got: U256 + }, /// Transaction's gas limit (aka gas) is invalid. InvalidGasLimit(OutOfBounds), } diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index f9810b964..406777251 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -92,10 +92,9 @@ impl Engine for Ethash { } } - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256) { header.difficulty = self.calculate_difficuty(header, parent); header.gas_limit = { - let gas_floor_target: U256 = x!(3141562); let gas_limit = parent.gas_limit; let bound_divisor = self.u256_param("gasLimitBoundDivisor"); if gas_limit < gas_floor_target { @@ -298,9 +297,9 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; - let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]); let b = b.close(); assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); } @@ -311,9 +310,9 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; - let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); + let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]); let mut uncle = Header::new(); let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106"); uncle.author = uncle_author.clone(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 0d1dcd8d5..8c2ae6b37 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -61,7 +61,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64)); diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 9d4dd3bc4..445c0be41 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -25,9 +25,8 @@ struct FakeLogEntry { } #[derive(PartialEq, Eq, Hash, Debug)] -#[cfg_attr(feature="dev", allow(enum_variant_names))] // Common prefix is C ;) enum FakeCallType { - CALL, CREATE + Call, Create } #[derive(PartialEq, Eq, Hash, Debug)] @@ -56,7 +55,7 @@ struct FakeExt { info: EnvInfo, schedule: Schedule, balances: HashMap, - calls: HashSet + calls: HashSet, } impl FakeExt { @@ -94,7 +93,7 @@ impl Ext for FakeExt { fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CREATE, + call_type: FakeCallType::Create, gas: *gas, sender_address: None, receive_address: None, @@ -115,7 +114,7 @@ impl Ext for FakeExt { _output: &mut [u8]) -> MessageCallResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: *gas, sender_address: Some(sender_address.clone()), receive_address: Some(receive_address.clone()), @@ -347,7 +346,7 @@ fn test_log_empty(factory: super::Factory) { assert_eq!(gas_left, U256::from(99_619)); assert_eq!(ext.logs.len(), 1); assert_eq!(ext.logs[0].topics.len(), 0); - assert_eq!(ext.logs[0].data, vec![]); + assert!(ext.logs[0].data.is_empty()); } evm_test!{test_log_sender: test_log_sender_jit, test_log_sender_int} @@ -909,7 +908,7 @@ fn test_calls(factory: super::Factory) { }; assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(code_address.clone()), @@ -918,7 +917,7 @@ fn test_calls(factory: super::Factory) { code_address: Some(code_address.clone()) }); assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(address.clone()), diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index beb8d62a1..d37bc20fb 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -226,9 +226,9 @@ impl<'a> Ext for Externalities<'a> { fn log(&mut self, topics: Vec, data: &[u8]) { let address = self.origin_info.address.clone(); - self.substate.logs.push(LogEntry { + self.substate.logs.push(LogEntry { address: address, - topics: topics, + topics: topics, data: data.to_vec() }); } @@ -301,8 +301,14 @@ mod tests { env_info: EnvInfo } + impl Default for TestSetup { + fn default() -> Self { + TestSetup::new() + } + } + impl TestSetup { - fn new() -> TestSetup { + fn new() -> Self { TestSetup { state: get_temp_state(), engine: get_test_spec().to_engine().unwrap(), diff --git a/ethcore/src/extras.rs b/ethcore/src/extras.rs index f4759b040..a7c82c37c 100644 --- a/ethcore/src/extras.rs +++ b/ethcore/src/extras.rs @@ -35,13 +35,13 @@ pub enum ExtrasIndex { BlocksBlooms = 4, /// Block receipts index BlockReceipts = 5, -} +} /// trait used to write Extras data to db pub trait ExtrasWritable { /// Write extra data to db fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable; } @@ -60,9 +60,9 @@ pub trait ExtrasReadable { impl ExtrasWritable for DBTransaction { fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable { - + self.put(&hash.to_extras_slice(T::extras_index()), &encode(value)).unwrap() } } @@ -215,6 +215,12 @@ pub struct BlocksBlooms { pub blooms: [H2048; 16], } +impl Default for BlocksBlooms { + fn default() -> Self { + BlocksBlooms::new() + } +} + impl BlocksBlooms { pub fn new() -> Self { BlocksBlooms { blooms: unsafe { ::std::mem::zeroed() }} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 938da02a0..572cda2fa 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -25,6 +25,8 @@ #![cfg_attr(feature="dev", allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(feature="dev", allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(feature="dev", allow(if_not_else))] //! Ethcore library //! @@ -86,6 +88,7 @@ extern crate crossbeam; #[cfg(feature = "jit" )] extern crate evmjit; pub mod block; +pub mod block_queue; pub mod client; pub mod error; pub mod ethereum; @@ -119,7 +122,6 @@ mod substate; mod executive; mod externalities; mod verification; -mod block_queue; mod blockchain; #[cfg(test)] diff --git a/ethcore/src/log_entry.rs b/ethcore/src/log_entry.rs index a75e6fcc1..63d09b4f0 100644 --- a/ethcore/src/log_entry.rs +++ b/ethcore/src/log_entry.rs @@ -111,7 +111,7 @@ mod tests { let bloom = H2048::from_str("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let log = LogEntry { - address: address, + address: address, topics: vec![], data: vec![] }; diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 756d02407..bcfe7724f 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -28,10 +28,16 @@ pub enum SyncMessage { /// New block has been imported into the blockchain NewChainBlocks { /// Hashes of blocks imported to blockchain - good: Vec, - /// Hashes of blocks not imported to blockchain - bad: Vec, + imported: Vec, + /// Hashes of blocks not imported to blockchain (because were invalid) + invalid: Vec, + /// Hashes of blocks that were removed from canonical chain + retracted: Vec, + /// Hashes of blocks that are now included in cannonical chain + enacted: Vec, }, + /// Best Block Hash in chain has been changed + NewChainHead, /// A block is ready BlockVerified, } @@ -115,12 +121,11 @@ impl IoHandler for ClientIoHandler { } } - #[cfg_attr(feature="dev", allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(single_match))] fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { - if let &UserMessage(ref message) = net_message { - match message { - &SyncMessage::BlockVerified => { + if let UserMessage(ref message) = *net_message { + match *message { + SyncMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }, _ => {}, // ignore other messages diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 38a0dda53..2208350cc 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -136,7 +136,7 @@ impl Spec { uncles_hash: RlpStream::new_list(0).out().sha3(), extra_data: self.extra_data.clone(), state_root: self.state_root().clone(), - receipts_root: self.receipts_root.clone(), + receipts_root: self.receipts_root.clone(), log_bloom: H2048::new().clone(), gas_used: self.gas_used.clone(), gas_limit: self.gas_limit.clone(), @@ -182,7 +182,7 @@ impl Spec { ) } }; - + self.parent_hash = H256::from_json(&genesis["parentHash"]); self.transactions_root = genesis.find("transactionsTrie").and_then(|_| Some(H256::from_json(&genesis["transactionsTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); self.receipts_root = genesis.find("receiptTrie").and_then(|_| Some(H256::from_json(&genesis["receiptTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); @@ -249,7 +249,7 @@ impl FromJson for Spec { ) } }; - + Spec { name: json.find("name").map_or("unknown", |j| j.as_string().unwrap()).to_owned(), engine_name: json["engineName"].as_string().unwrap().to_owned(), @@ -278,7 +278,7 @@ impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { if !db.contains(&self.state_root()) { - let mut root = H256::new(); + let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); for (address, account) in self.genesis_state.get().iter() { diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index c13678c38..cb54654e6 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -31,7 +31,7 @@ pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. pub struct State { - db: JournalDB, + db: Box, root: H256, cache: RefCell>>, snapshots: RefCell>>>>, @@ -41,11 +41,11 @@ pub struct State { impl State { /// Creates new state with empty state root #[cfg(test)] - pub fn new(mut db: JournalDB, account_start_nonce: U256) -> State { + pub fn new(mut db: Box, account_start_nonce: U256) -> State { let mut root = H256::new(); { // init trie and reset root too null - let _ = SecTrieDBMut::new(&mut db, &mut root); + let _ = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root); } State { @@ -58,10 +58,10 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: JournalDB, root: H256, account_start_nonce: U256) -> State { + pub fn from_existing(db: Box, root: H256, account_start_nonce: U256) -> State { { // trie should panic! if root does not exist - let _ = SecTrieDB::new(&db, &root); + let _ = SecTrieDB::new(db.as_hashdb(), &root); } State { @@ -126,7 +126,7 @@ impl State { } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, JournalDB) { + pub fn drop(self) -> (H256, Box) { (self.root, self.db) } @@ -148,7 +148,7 @@ impl State { /// Determine whether an account exists. pub fn exists(&self, a: &Address) -> bool { - self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(&self.db, &self.root).contains(&a) + self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(self.db.as_hashdb(), &self.root).contains(&a) } /// Get the balance of account `a`. @@ -163,7 +163,7 @@ impl State { /// Mutate storage of account `address` so that it is `value` for `key`. pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { - self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(&self.db, address), key)) + self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(self.db.as_hashdb(), address), key)) } /// Mutate storage of account `a` so that it is `value` for `key`. @@ -253,7 +253,7 @@ impl State { /// Commits our cached account changes into the trie. pub fn commit(&mut self) { assert!(self.snapshots.borrow().is_empty()); - Self::commit_into(&mut self.db, &mut self.root, self.cache.borrow_mut().deref_mut()); + Self::commit_into(self.db.as_hashdb_mut(), &mut self.root, self.cache.borrow_mut().deref_mut()); } #[cfg(test)] @@ -285,11 +285,11 @@ impl State { fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option { let have_key = self.cache.borrow().contains_key(a); if !have_key { - self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp)) } if require_code { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { - account.cache_code(&AccountDB::new(&self.db, a)); + account.cache_code(&AccountDB::new(self.db.as_hashdb(), a)); } } unsafe { ::std::mem::transmute(self.cache.borrow().get(a).unwrap()) } @@ -305,7 +305,7 @@ impl State { fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account { let have_key = self.cache.borrow().contains_key(a); if !have_key { - self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp)) } else { self.note_cache(a); } @@ -318,7 +318,7 @@ impl State { unsafe { ::std::mem::transmute(self.cache.borrow_mut().get_mut(a).unwrap().as_mut().map(|account| { if require_code { - account.cache_code(&AccountDB::new(&self.db, a)); + account.cache_code(&AccountDB::new(self.db.as_hashdb(), a)); } account }).unwrap()) } diff --git a/ethcore/src/substate.rs b/ethcore/src/substate.rs index 374397ca7..57e35ad2e 100644 --- a/ethcore/src/substate.rs +++ b/ethcore/src/substate.rs @@ -31,6 +31,12 @@ pub struct Substate { pub contracts_created: Vec
} +impl Default for Substate { + fn default() -> Self { + Substate::new() + } +} + impl Substate { /// Creates new substate. pub fn new() -> Self { @@ -67,8 +73,8 @@ mod tests { let mut sub_state = Substate::new(); sub_state.contracts_created.push(address_from_u64(1u64)); sub_state.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state.sstore_clears_count = x!(5); @@ -77,8 +83,8 @@ mod tests { let mut sub_state_2 = Substate::new(); sub_state_2.contracts_created.push(address_from_u64(2u64)); sub_state_2.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state_2.sstore_clears_count = x!(7); diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 001d1729b..d9fae0527 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -35,6 +35,17 @@ fn imports_from_empty() { client.flush_queue(); } +#[test] +fn returns_state_root_basic() { + let client_result = generate_dummy_client(6); + let client = client_result.reference(); + let test_spec = get_test_spec(); + let test_engine = test_spec.to_engine().unwrap(); + let state_root = test_engine.spec().genesis_header().state_root; + + assert!(client.state_data(&state_root).is_some()); +} + #[test] fn imports_good_block() { let dir = RandomTempPath::new(); @@ -132,16 +143,9 @@ fn can_mine() { let dummy_blocks = get_good_dummy_block_seq(2); let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); let client = client_result.reference(); - let b = client.sealing_block(); - let pow_hash = { - let u = b.lock().unwrap(); - match *u { - Some(ref b) => { - assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); - b.hash() - } - None => { panic!(); } - } - }; - assert!(client.submit_seal(pow_hash, vec![]).is_ok()); + + let b = client.prepare_sealing(Address::default(), x!(31415926), vec![], vec![]).unwrap(); + + assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); + assert!(client.try_seal(b, vec![]).is_ok()); } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index bb9a44614..dc3068560 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -250,9 +250,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { } } -pub fn get_temp_journal_db() -> GuardedTempResult { +pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = JournalDB::new(temp.as_str()); + let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge); GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -268,8 +268,8 @@ pub fn get_temp_state() -> GuardedTempResult { } } -pub fn get_temp_journal_db_in(path: &Path) -> JournalDB { - JournalDB::new(path.to_str().unwrap()) +pub fn get_temp_journal_db_in(path: &Path) -> Box { + journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge) } pub fn get_temp_state_in(path: &Path) -> State { diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 260121989..fe1f406cc 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -17,9 +17,11 @@ pub mod verification; pub mod verifier; mod canon_verifier; +#[cfg(test)] mod noop_verifier; pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; +#[cfg(test)] pub use self::noop_verifier::NoopVerifier; diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index ae2a153fe..20c15c3f1 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -20,6 +20,7 @@ use error::Error; use header::Header; use super::Verifier; +#[allow(dead_code)] pub struct NoopVerifier; impl Verifier for NoopVerifier { diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index ed3db3791..60cbed56c 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -255,8 +255,14 @@ mod tests { numbers: HashMap, } + impl Default for TestBlockChain { + fn default() -> Self { + TestBlockChain::new() + } + } + impl TestBlockChain { - pub fn new() -> TestBlockChain { + pub fn new() -> Self { TestBlockChain { blocks: HashMap::new(), numbers: HashMap::new(), diff --git a/ethcore/src/views.rs b/ethcore/src/views.rs index 4a7ff054d..745cbff2c 100644 --- a/ethcore/src/views.rs +++ b/ethcore/src/views.rs @@ -223,6 +223,11 @@ impl<'a> BlockView<'a> { pub fn uncle_hashes(&self) -> Vec { self.rlp.at(2).iter().map(|rlp| rlp.as_raw().sha3()).collect() } + + /// Return nth uncle. + pub fn uncle_at(&self, index: usize) -> Option
{ + self.rlp.at(2).iter().nth(index).map(|rlp| rlp.as_val()) + } } impl<'a> Hashable for BlockView<'a> { @@ -280,7 +285,7 @@ impl<'a> HeaderView<'a> { /// Returns block number. pub fn number(&self) -> BlockNumber { self.rlp.val_at(8) } - + /// Returns block gas limit. pub fn gas_limit(&self) -> U256 { self.rlp.val_at(9) } diff --git a/evmjit/Cargo.toml b/evmjit/Cargo.toml index 9449af82d..6586a360e 100644 --- a/evmjit/Cargo.toml +++ b/evmjit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "evmjit" -version = "0.9.99" +version = "1.1.0" authors = ["debris "] [lib] diff --git a/hook.sh b/hook.sh index 106ffe4f0..58bff20ab 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,12 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push -chmod +x ./.git/hooks/pre-push +FILE=./.git/hooks/pre-push +echo "#!/bin/sh\n" > $FILE +# Exit on any error +echo "set -e" >> $FILE +# Run release build +echo "cargo build --release --features dev" >> $FILE +# Build tests +echo "cargo test --no-run --features dev \\" >> $FILE +echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" >> $FILE +echo "" >> $FILE +chmod +x $FILE diff --git a/miner/Cargo.toml b/miner/Cargo.toml new file mode 100644 index 000000000..cd56aee9e --- /dev/null +++ b/miner/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "Ethminer library" +homepage = "http://ethcore.io" +license = "GPL-3.0" +name = "ethminer" +version = "1.1.0" +authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" + +[dependencies] +ethcore-util = { path = "../util" } +ethcore = { path = "../ethcore" } +log = "0.3" +env_logger = "0.3" +rustc-serialize = "0.3" +rayon = "0.3.1" +clippy = { version = "0.0.50", optional = true } + +[features] +default = [] +dev = ["clippy"] diff --git a/miner/build.rs b/miner/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/miner/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/miner/src/lib.rs b/miner/src/lib.rs new file mode 100644 index 000000000..a431bd44e --- /dev/null +++ b/miner/src/lib.rs @@ -0,0 +1,111 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![warn(missing_docs)] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] + +//! Miner module +//! Keeps track of transactions and mined block. +//! +//! Usage example: +//! +//! ```rust +//! extern crate ethcore_util as util; +//! extern crate ethcore; +//! extern crate ethminer; +//! use std::ops::Deref; +//! use std::env; +//! use std::sync::Arc; +//! use util::network::{NetworkService, NetworkConfiguration}; +//! use ethcore::client::{Client, ClientConfig, BlockChainClient}; +//! use ethcore::ethereum; +//! use ethminer::{Miner, MinerService}; +//! +//! fn main() { +//! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); +//! let dir = env::temp_dir(); +//! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); +//! +//! let miner: Miner = Miner::default(); +//! // get status +//! assert_eq!(miner.status().transaction_queue_pending, 0); +//! +//! // Check block for sealing +//! miner.prepare_sealing(client.deref()); +//! assert!(miner.sealing_block(client.deref()).lock().unwrap().is_some()); +//! } +//! ``` + + +#[macro_use] +extern crate log; +#[macro_use] +extern crate ethcore_util as util; +extern crate ethcore; +extern crate env_logger; +extern crate rayon; + +mod miner; +mod transaction_queue; + +pub use transaction_queue::TransactionQueue; +pub use miner::{Miner}; + +use std::sync::Mutex; +use util::{H256, U256, Address, Bytes}; +use ethcore::client::{BlockChainClient}; +use ethcore::block::{ClosedBlock}; +use ethcore::error::{Error}; +use ethcore::transaction::SignedTransaction; + +/// Miner client API +pub trait MinerService : Send + Sync { + + /// Returns miner's status. + fn status(&self) -> MinerStatus; + + /// Imports transactions to transaction queue. + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> + where T: Fn(&Address) -> U256; + + /// Returns hashes of transactions currently in pending + fn pending_transactions_hashes(&self) -> Vec; + + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, chain: &BlockChainClient); + + /// Called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]); + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, chain: &BlockChainClient); + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error>; +} + +/// Mining status +pub struct MinerStatus { + /// Number of transactions in queue with state `pending` (ready to be included in block) + pub transaction_queue_pending: usize, + /// Number of transactions in queue with state `future` (not yet ready to be included in block) + pub transaction_queue_future: usize, +} diff --git a/miner/src/miner.rs b/miner/src/miner.rs new file mode 100644 index 000000000..6d5b3086e --- /dev/null +++ b/miner/src/miner.rs @@ -0,0 +1,206 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use rayon::prelude::*; +use std::sync::{Mutex, RwLock, Arc}; +use std::sync::atomic; +use std::sync::atomic::AtomicBool; + +use util::{H256, U256, Address, Bytes, Uint}; +use ethcore::views::{BlockView}; +use ethcore::client::{BlockChainClient, BlockId}; +use ethcore::block::{ClosedBlock}; +use ethcore::error::{Error}; +use ethcore::transaction::SignedTransaction; +use super::{MinerService, MinerStatus, TransactionQueue}; + +/// Keeps track of transactions using priority queue and holds currently mined block. +pub struct Miner { + transaction_queue: Mutex, + + // for sealing... + sealing_enabled: AtomicBool, + sealing_block: Mutex>, + gas_floor_target: RwLock, + author: RwLock
, + extra_data: RwLock, + +} + +impl Default for Miner { + fn default() -> Miner { + Miner { + transaction_queue: Mutex::new(TransactionQueue::new()), + sealing_enabled: AtomicBool::new(false), + sealing_block: Mutex::new(None), + gas_floor_target: RwLock::new(U256::zero()), + author: RwLock::new(Address::default()), + extra_data: RwLock::new(Vec::new()), + } + } +} + +impl Miner { + /// Creates new instance of miner + pub fn new() -> Arc { + Arc::new(Miner::default()) + } + + /// Get the author that we will seal blocks as. + fn author(&self) -> Address { + *self.author.read().unwrap() + } + + /// Get the extra_data that we will seal blocks wuth. + fn extra_data(&self) -> Bytes { + self.extra_data.read().unwrap().clone() + } + + /// Get the extra_data that we will seal blocks wuth. + fn gas_floor_target(&self) -> U256 { + self.gas_floor_target.read().unwrap().clone() + } + + /// Set the author that we will seal blocks as. + pub fn set_author(&self, author: Address) { + *self.author.write().unwrap() = author; + } + + /// Set the extra_data that we will seal blocks with. + pub fn set_extra_data(&self, extra_data: Bytes) { + *self.extra_data.write().unwrap() = extra_data; + } + + /// Set the gas limit we wish to target when sealing a new block. + pub fn set_gas_floor_target(&self, target: U256) { + *self.gas_floor_target.write().unwrap() = target; + } + + /// Set minimal gas price of transaction to be accepted for mining. + pub fn set_minimal_gas_price(&self, min_gas_price: U256) { + self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price); + } +} + +impl MinerService for Miner { + + fn clear_and_reset(&self, chain: &BlockChainClient) { + self.transaction_queue.lock().unwrap().clear(); + self.prepare_sealing(chain); + } + + fn status(&self) -> MinerStatus { + let status = self.transaction_queue.lock().unwrap().status(); + MinerStatus { + transaction_queue_pending: status.pending, + transaction_queue_future: status.future, + } + } + + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> + where T: Fn(&Address) -> U256 { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(transactions, fetch_nonce) + } + + fn pending_transactions_hashes(&self) -> Vec { + let transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.pending_hashes() + } + + fn prepare_sealing(&self, chain: &BlockChainClient) { + let no_of_transactions = 128; + // TODO: should select transactions orm queue according to gas limit of block. + let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); + + let b = chain.prepare_sealing( + self.author(), + self.gas_floor_target(), + self.extra_data(), + transactions, + ); + *self.sealing_block.lock().unwrap() = b; + } + + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex> { + if self.sealing_block.lock().unwrap().is_none() { + self.sealing_enabled.store(true, atomic::Ordering::Relaxed); + // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. + self.prepare_sealing(chain); + } + &self.sealing_block + } + + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { + let mut maybe_b = self.sealing_block.lock().unwrap(); + match *maybe_b { + Some(ref b) if b.hash() == pow_hash => {} + _ => { return Err(Error::PowHashInvalid); } + } + + let b = maybe_b.take(); + match chain.try_seal(b.unwrap(), seal) { + Err(old) => { + *maybe_b = Some(old); + Err(Error::PowInvalid) + } + Ok(sealed) => { + // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. + try!(chain.import_block(sealed.rlp_bytes())); + Ok(()) + } + } + } + + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) { + fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { + let block = chain + .block(BlockId::Hash(*hash)) + // Client should send message after commit to db and inserting to chain. + .expect("Expected in-chain blocks."); + let block = BlockView::new(&block); + block.transactions() + } + + { + let in_chain = vec![imported, enacted, invalid]; + let in_chain = in_chain + .par_iter() + .flat_map(|h| h.par_iter().map(|h| fetch_transactions(chain, h))); + let out_of_chain = retracted + .par_iter() + .map(|h| fetch_transactions(chain, h)); + + in_chain.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + out_of_chain.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } + + if self.sealing_enabled.load(atomic::Ordering::Relaxed) { + self.prepare_sealing(chain); + } + } +} diff --git a/miner/src/transaction_queue.rs b/miner/src/transaction_queue.rs new file mode 100644 index 000000000..880c73750 --- /dev/null +++ b/miner/src/transaction_queue.rs @@ -0,0 +1,1044 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +// TODO [todr] - own transactions should have higher priority + +//! Transaction Queue +//! +//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions +//! and orders them by priority. Top priority transactions are those with low nonce height (difference between +//! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used +//! for comparison (higher gas price = higher priority). +//! +//! # Usage Example +//! +//! ```rust +//! extern crate ethcore_util as util; +//! extern crate ethcore; +//! extern crate ethminer; +//! extern crate rustc_serialize; +//! +//! use util::crypto::KeyPair; +//! use util::hash::Address; +//! use util::numbers::{Uint, U256}; +//! use ethminer::TransactionQueue; +//! use ethcore::transaction::*; +//! use rustc_serialize::hex::FromHex; +//! +//! fn main() { +//! let key = KeyPair::create().unwrap(); +//! let t1 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), +//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(10) }; +//! let t2 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), +//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(11) }; +//! +//! let st1 = t1.sign(&key.secret()); +//! let st2 = t2.sign(&key.secret()); +//! let default_nonce = |_a: &Address| U256::from(10); +//! +//! let mut txq = TransactionQueue::new(); +//! txq.add(st2.clone(), &default_nonce); +//! txq.add(st1.clone(), &default_nonce); +//! +//! // Check status +//! assert_eq!(txq.status().pending, 2); +//! // Check top transactions +//! let top = txq.top_transactions(3); +//! assert_eq!(top.len(), 2); +//! assert_eq!(top[0], st1); +//! assert_eq!(top[1], st2); +//! +//! // And when transaction is removed (but nonce haven't changed) +//! // it will move invalid transactions to future +//! txq.remove(&st1.hash(), &default_nonce); +//! assert_eq!(txq.status().pending, 0); +//! assert_eq!(txq.status().future, 1); +//! assert_eq!(txq.top_transactions(3).len(), 0); +//! } +//! ``` +//! +//! # Maintaing valid state +//! +//! 1. Whenever transaction is imported to queue (to queue) all other transactions from this sender are revalidated in current. It means that they are moved to future and back again (height recalculation & gap filling). +//! 2. Whenever transaction is removed: +//! - When it's removed from `future` - all `future` transactions heights are recalculated and then +//! we check if the transactions should go to `current` (comparing state nonce) +//! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. +//! + +use std::default::Default; +use std::cmp::{Ordering}; +use std::collections::{HashMap, BTreeSet}; +use util::numbers::{Uint, U256}; +use util::hash::{Address, H256}; +use util::table::*; +use ethcore::transaction::*; +use ethcore::error::{Error, TransactionError}; + + +#[derive(Clone, Debug)] +/// Light structure used to identify transaction and it's order +struct TransactionOrder { + /// Primary ordering factory. Difference between transaction nonce and expected nonce in state + /// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5) + /// High nonce_height = Low priority (processed later) + nonce_height: U256, + /// Gas Price of the transaction. + /// Low gas price = Low priority (processed later) + gas_price: U256, + /// Hash to identify associated transaction + hash: H256, +} + + +impl TransactionOrder { + fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { + TransactionOrder { + nonce_height: tx.nonce() - base_nonce, + gas_price: tx.transaction.gas_price, + hash: tx.hash(), + } + } + + fn update_height(mut self, nonce: U256, base_nonce: U256) -> Self { + self.nonce_height = nonce - base_nonce; + self + } +} + +impl Eq for TransactionOrder {} +impl PartialEq for TransactionOrder { + fn eq(&self, other: &TransactionOrder) -> bool { + self.cmp(other) == Ordering::Equal + } +} +impl PartialOrd for TransactionOrder { + fn partial_cmp(&self, other: &TransactionOrder) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for TransactionOrder { + fn cmp(&self, b: &TransactionOrder) -> Ordering { + // First check nonce_height + if self.nonce_height != b.nonce_height { + return self.nonce_height.cmp(&b.nonce_height); + } + + // Then compare gas_prices + let a_gas = self.gas_price; + let b_gas = b.gas_price; + if a_gas != b_gas { + return b_gas.cmp(&a_gas); + } + + // Compare hashes + self.hash.cmp(&b.hash) + } +} + +/// Verified transaction (with sender) +struct VerifiedTransaction { + transaction: SignedTransaction +} +impl VerifiedTransaction { + fn new(transaction: SignedTransaction) -> Result { + try!(transaction.sender()); + Ok(VerifiedTransaction { + transaction: transaction + }) + } + + fn hash(&self) -> H256 { + self.transaction.hash() + } + + fn nonce(&self) -> U256 { + self.transaction.nonce + } + + fn sender(&self) -> Address { + self.transaction.sender().unwrap() + } +} + +/// Holds transactions accessible by (address, nonce) and by priority +/// +/// TransactionSet keeps number of entries below limit, but it doesn't +/// automatically happen during `insert/remove` operations. +/// You have to call `enforce_limit` to remove lowest priority transactions from set. +struct TransactionSet { + by_priority: BTreeSet, + by_address: Table, + limit: usize, +} + +impl TransactionSet { + /// Inserts `TransactionOrder` to this set + fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { + self.by_priority.insert(order.clone()); + self.by_address.insert(sender, nonce, order) + } + + /// Remove low priority transactions if there is more then specified by given `limit`. + /// + /// It drops transactions from this set but also removes associated `VerifiedTransaction`. + fn enforce_limit(&mut self, by_hash: &mut HashMap) { + let len = self.by_priority.len(); + if len <= self.limit { + return; + } + + let to_drop : Vec<(Address, U256)> = { + self.by_priority + .iter() + .skip(self.limit) + .map(|order| by_hash.get(&order.hash).expect("Inconsistency in queue detected.")) + .map(|tx| (tx.sender(), tx.nonce())) + .collect() + }; + + for (sender, nonce) in to_drop { + let order = self.drop(&sender, &nonce).expect("Dropping transaction found in priority queue failed."); + by_hash.remove(&order.hash).expect("Inconsistency in queue."); + } + } + + /// Drop transaction from this set (remove from `by_priority` and `by_address`) + fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { + if let Some(tx_order) = self.by_address.remove(sender, nonce) { + self.by_priority.remove(&tx_order); + return Some(tx_order); + } + None + } + + /// Drop all transactions. + fn clear(&mut self) { + self.by_priority.clear(); + self.by_address.clear(); + } +} + +// Will be used when rpc merged +#[allow(dead_code)] +#[derive(Debug)] +/// Current status of the queue +pub struct TransactionQueueStatus { + /// Number of pending transactions (ready to go to block) + pub pending: usize, + /// Number of future transactions (waiting for transactions with lower nonces first) + pub future: usize, +} + +/// TransactionQueue implementation +pub struct TransactionQueue { + /// Gas Price threshold for transactions that can be imported to this queue (defaults to 0) + minimal_gas_price: U256, + /// Priority queue for transactions that can go to block + current: TransactionSet, + /// Priority queue for transactions that has been received but are not yet valid to go to block + future: TransactionSet, + /// All transactions managed by queue indexed by hash + by_hash: HashMap, + /// Last nonce of transaction in current (to quickly check next expected transaction) + last_nonces: HashMap, +} + +impl Default for TransactionQueue { + fn default() -> Self { + TransactionQueue::new() + } +} + +impl TransactionQueue { + /// Creates new instance of this Queue + pub fn new() -> Self { + Self::with_limits(1024, 1024) + } + + /// Create new instance of this Queue with specified limits + pub fn with_limits(current_limit: usize, future_limit: usize) -> Self { + let current = TransactionSet { + by_priority: BTreeSet::new(), + by_address: Table::new(), + limit: current_limit, + }; + let future = TransactionSet { + by_priority: BTreeSet::new(), + by_address: Table::new(), + limit: future_limit, + }; + + TransactionQueue { + minimal_gas_price: U256::zero(), + current: current, + future: future, + by_hash: HashMap::new(), + last_nonces: HashMap::new(), + } + } + + /// Sets new gas price threshold for incoming transactions. + /// Any transactions already imported to the queue are not affected. + pub fn set_minimal_gas_price(&mut self, min_gas_price: U256) { + self.minimal_gas_price = min_gas_price; + } + + // Will be used when rpc merged + #[allow(dead_code)] + /// Returns current status for this queue + pub fn status(&self) -> TransactionQueueStatus { + TransactionQueueStatus { + pending: self.current.by_priority.len(), + future: self.future.by_priority.len(), + } + } + + /// Adds all signed transactions to queue to be verified and imported + pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) -> Result<(), Error> + where T: Fn(&Address) -> U256 { + for tx in txs.into_iter() { + try!(self.add(tx, &fetch_nonce)); + } + Ok(()) + } + + /// Add signed transaction to queue to be verified and imported + pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error> + where T: Fn(&Address) -> U256 { + + if tx.gas_price < self.minimal_gas_price { + trace!(target: "sync", + "Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})", + tx.hash(), tx.gas_price, self.minimal_gas_price + ); + + return Err(Error::Transaction(TransactionError::InsufficientGasPrice{ + minimal: self.minimal_gas_price, + got: tx.gas_price + })); + } + + self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce); + Ok(()) + } + + /// Removes all transactions identified by hashes given in slice + /// + /// If gap is introduced marks subsequent transactions as future + pub fn remove_all(&mut self, transaction_hashes: &[H256], fetch_nonce: T) + where T: Fn(&Address) -> U256 { + for hash in transaction_hashes { + self.remove(&hash, &fetch_nonce); + } + } + + /// Removes transaction identified by hashes from queue. + /// + /// If gap is introduced marks subsequent transactions as future + pub fn remove(&mut self, transaction_hash: &H256, fetch_nonce: &T) + where T: Fn(&Address) -> U256 { + let transaction = self.by_hash.remove(transaction_hash); + if transaction.is_none() { + // We don't know this transaction + return; + } + + let transaction = transaction.unwrap(); + let sender = transaction.sender(); + let nonce = transaction.nonce(); + let current_nonce = fetch_nonce(&sender); + + // Remove from future + let order = self.future.drop(&sender, &nonce); + if order.is_some() { + self.update_future(&sender, current_nonce); + // And now lets check if there is some chain of transactions in future + // that should be placed in current + self.move_matching_future_to_current(sender, current_nonce, current_nonce); + return; + } + + // Remove from current + let order = self.current.drop(&sender, &nonce); + if order.is_some() { + // We will either move transaction to future or remove it completely + // so there will be no transactions from this sender in current + self.last_nonces.remove(&sender); + // First update height of transactions in future to avoid collisions + self.update_future(&sender, current_nonce); + // This should move all current transactions to future and remove old transactions + self.move_all_to_future(&sender, current_nonce); + // And now lets check if there is some chain of transactions in future + // that should be placed in current. It should also update last_nonces. + self.move_matching_future_to_current(sender, current_nonce, current_nonce); + return; + } + } + + /// Update height of all transactions in future transactions set. + fn update_future(&mut self, sender: &Address, current_nonce: U256) { + // We need to drain all transactions for current sender from future and reinsert them with updated height + let all_nonces_from_sender = match self.future.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in all_nonces_from_sender { + let order = self.future.drop(&sender, &k).unwrap(); + if k >= current_nonce { + self.future.insert(*sender, k, order.update_height(k, current_nonce)); + } else { + // Remove the transaction completely + self.by_hash.remove(&order.hash); + } + } + } + + /// Drop all transactions from given sender from `current`. + /// Either moves them to `future` or removes them from queue completely. + fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) { + let all_nonces_from_sender = match self.current.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + + for k in all_nonces_from_sender { + // Goes to future or is removed + let order = self.current.drop(&sender, &k).unwrap(); + if k >= current_nonce { + self.future.insert(*sender, k, order.update_height(k, current_nonce)); + } else { + self.by_hash.remove(&order.hash); + } + } + self.future.enforce_limit(&mut self.by_hash); + } + + // Will be used when mining merged + #[allow(dead_code)] + /// Returns top transactions from the queue ordered by priority. + pub fn top_transactions(&self, size: usize) -> Vec { + self.current.by_priority + .iter() + .take(size) + .map(|t| self.by_hash.get(&t.hash).expect("Transaction Queue Inconsistency")) + .map(|t| t.transaction.clone()) + .collect() + } + + /// Returns hashes of all transactions from current, ordered by priority. + pub fn pending_hashes(&self) -> Vec { + self.current.by_priority + .iter() + .map(|t| t.hash) + .collect() + } + + /// Removes all elements (in any state) from the queue + pub fn clear(&mut self) { + self.current.clear(); + self.future.clear(); + self.by_hash.clear(); + self.last_nonces.clear(); + } + + /// Checks if there are any transactions in `future` that should actually be promoted to `current` + /// (because nonce matches). + fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { + { + let by_nonce = self.future.by_address.row_mut(&address); + if let None = by_nonce { + return; + } + let mut by_nonce = by_nonce.unwrap(); + while let Some(order) = by_nonce.remove(¤t_nonce) { + // remove also from priority and hash + self.future.by_priority.remove(&order); + // Put to current + let order = order.update_height(current_nonce, first_nonce); + self.current.insert(address, current_nonce, order); + current_nonce = current_nonce + U256::one(); + } + } + self.future.by_address.clear_if_empty(&address); + // Update last inserted nonce + self.last_nonces.insert(address, current_nonce - U256::one()); + } + + /// Adds VerifiedTransaction to this queue. + /// + /// Determines if it should be placed in current or future. When transaction is + /// imported to `current` also checks if there are any `future` transactions that should be promoted because of + /// this. + /// + /// It ignores transactions that has already been imported (same `hash`) and replaces the transaction + /// iff `(address, nonce)` is the same but `gas_price` is higher. + fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) + where T: Fn(&Address) -> U256 { + + if self.by_hash.get(&tx.hash()).is_some() { + // Transaction is already imported. + trace!(target: "sync", "Dropping already imported transaction with hash: {:?}", tx.hash()); + return; + } + + let address = tx.sender(); + let nonce = tx.nonce(); + + let state_nonce = fetch_nonce(&address); + let next_nonce = self.last_nonces + .get(&address) + .cloned() + .map_or(state_nonce, |n| n + U256::one()); + + // Check height + if nonce > next_nonce { + // We have a gap - put to future + Self::replace_transaction(tx, next_nonce, &mut self.future, &mut self.by_hash); + self.future.enforce_limit(&mut self.by_hash); + return; + } else if nonce < state_nonce { + // Droping transaction + trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce); + return; + } + + let base_nonce = fetch_nonce(&address); + Self::replace_transaction(tx, base_nonce, &mut self.current, &mut self.by_hash); + self.last_nonces.insert(address, nonce); + // But maybe there are some more items waiting in future? + self.move_matching_future_to_current(address, nonce + U256::one(), base_nonce); + self.current.enforce_limit(&mut self.by_hash); + } + + /// Replaces transaction in given set (could be `future` or `current`). + /// + /// If there is already transaction with same `(sender, nonce)` it will be replaced iff `gas_price` is higher. + /// One of the transactions is dropped from set and also removed from queue entirely (from `by_hash`). + fn replace_transaction(tx: VerifiedTransaction, base_nonce: U256, set: &mut TransactionSet, by_hash: &mut HashMap) { + let order = TransactionOrder::for_transaction(&tx, base_nonce); + let hash = tx.hash(); + let address = tx.sender(); + let nonce = tx.nonce(); + + by_hash.insert(hash, tx); + if let Some(old) = set.insert(address, nonce, order.clone()) { + // There was already transaction in queue. Let's check which one should stay + let old_fee = old.gas_price; + let new_fee = order.gas_price; + if old_fee.cmp(&new_fee) == Ordering::Greater { + // Put back old transaction since it has greater priority (higher gas_price) + set.by_address.insert(address, nonce, old); + // and remove new one + set.by_priority.remove(&order); + by_hash.remove(&hash); + } else { + // Make sure we remove old transaction entirely + set.by_priority.remove(&old); + by_hash.remove(&old.hash); + } + } + } +} + + +#[cfg(test)] +mod test { + extern crate rustc_serialize; + use util::table::*; + use util::*; + use ethcore::transaction::*; + use super::*; + use super::{TransactionSet, TransactionOrder, VerifiedTransaction}; + + fn new_unsigned_tx(nonce: U256) -> Transaction { + Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: nonce + } + } + + fn new_tx() -> SignedTransaction { + let keypair = KeyPair::create().unwrap(); + new_unsigned_tx(U256::from(123)).sign(&keypair.secret()) + } + + fn default_nonce(_address: &Address) -> U256 { + U256::from(123) + } + + fn new_txs(second_nonce: U256) -> (SignedTransaction, SignedTransaction) { + let keypair = KeyPair::create().unwrap(); + let secret = &keypair.secret(); + let nonce = U256::from(123); + let tx = new_unsigned_tx(nonce); + let tx2 = new_unsigned_tx(nonce + second_nonce); + + (tx.sign(secret), tx2.sign(secret)) + } + + #[test] + fn should_create_transaction_set() { + // given + let mut set = TransactionSet { + by_priority: BTreeSet::new(), + by_address: Table::new(), + limit: 1 + }; + let (tx1, tx2) = new_txs(U256::from(1)); + let tx1 = VerifiedTransaction::new(tx1).unwrap(); + let tx2 = VerifiedTransaction::new(tx2).unwrap(); + let mut by_hash = { + let mut x = HashMap::new(); + let tx1 = VerifiedTransaction::new(tx1.transaction.clone()).unwrap(); + let tx2 = VerifiedTransaction::new(tx2.transaction.clone()).unwrap(); + x.insert(tx1.hash(), tx1); + x.insert(tx2.hash(), tx2); + x + }; + // Insert both transactions + let order1 = TransactionOrder::for_transaction(&tx1, U256::zero()); + set.insert(tx1.sender(), tx1.nonce(), order1.clone()); + let order2 = TransactionOrder::for_transaction(&tx2, U256::zero()); + set.insert(tx2.sender(), tx2.nonce(), order2.clone()); + assert_eq!(set.by_priority.len(), 2); + assert_eq!(set.by_address.len(), 2); + + // when + set.enforce_limit(&mut by_hash); + + // then + assert_eq!(by_hash.len(), 1); + assert_eq!(set.by_priority.len(), 1); + assert_eq!(set.by_address.len(), 1); + assert_eq!(set.by_priority.iter().next().unwrap().clone(), order1); + set.clear(); + assert_eq!(set.by_priority.len(), 0); + assert_eq!(set.by_address.len(), 0); + } + + + #[test] + fn should_import_tx() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_tx(); + + // when + let res = txq.add(tx, &default_nonce); + + // then + assert!(res.is_ok()); + let stats = txq.status(); + assert_eq!(stats.pending, 1); + } + + #[test] + fn should_not_import_transaction_below_min_gas_price_threshold() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_tx(); + txq.set_minimal_gas_price(tx.gas_price + U256::one()); + + // when + txq.add(tx, &default_nonce).unwrap_err(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 0); + assert_eq!(stats.future, 0); + } + + #[test] + fn should_reject_incorectly_signed_transaction() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_unsigned_tx(U256::from(123)); + let stx = { + let mut s = RlpStream::new_list(9); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + s.append_empty_data(); // action=create + s.append(&tx.value); + s.append(&tx.data); + s.append(&0u64); // v + s.append(&U256::zero()); // r + s.append(&U256::zero()); // s + decode(s.as_raw()) + }; + // when + let res = txq.add(stx, &default_nonce); + + // then + assert!(res.is_err()); + } + + #[test] + fn should_import_txs_from_same_sender() { + // given + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + + // when + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + + // then + let top = txq.top_transactions(5); + assert_eq!(top[0], tx); + assert_eq!(top[1], tx2); + assert_eq!(top.len(), 2); + } + + #[test] + fn should_return_pending_hashes() { + // given + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + + // when + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + + // then + let top = txq.pending_hashes(); + assert_eq!(top[0], tx.hash()); + assert_eq!(top[1], tx2.hash()); + assert_eq!(top.len(), 2); + } + + #[test] + fn should_put_transaction_to_futures_if_gap_detected() { + // given + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(2)); + + // when + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 1); + assert_eq!(stats.future, 1); + let top = txq.top_transactions(5); + assert_eq!(top.len(), 1); + assert_eq!(top[0], tx); + } + + #[test] + fn should_correctly_update_futures_when_removing() { + // given + let prev_nonce = |a: &Address| default_nonce(a) - U256::one(); + let next2_nonce = |a: &Address| default_nonce(a) + U256::from(2); + + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + txq.add(tx.clone(), &prev_nonce).unwrap(); + txq.add(tx2.clone(), &prev_nonce).unwrap(); + assert_eq!(txq.status().future, 2); + + // when + txq.remove(&tx.hash(), &next2_nonce); + // should remove both transactions since they are not valid + + // then + assert_eq!(txq.status().pending, 0); + assert_eq!(txq.status().future, 0); + } + + #[test] + fn should_move_transactions_if_gap_filled() { + // given + let mut txq = TransactionQueue::new(); + let kp = KeyPair::create().unwrap(); + let secret = kp.secret(); + let tx = new_unsigned_tx(U256::from(123)).sign(&secret); + let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret); + let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret); + + txq.add(tx, &default_nonce).unwrap(); + assert_eq!(txq.status().pending, 1); + txq.add(tx2, &default_nonce).unwrap(); + assert_eq!(txq.status().future, 1); + + // when + txq.add(tx1, &default_nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 3); + assert_eq!(stats.future, 0); + } + + #[test] + fn should_remove_transaction() { + // given + let mut txq2 = TransactionQueue::new(); + let (tx, tx2) = new_txs(U256::from(3)); + txq2.add(tx.clone(), &default_nonce).unwrap(); + txq2.add(tx2.clone(), &default_nonce).unwrap(); + assert_eq!(txq2.status().pending, 1); + assert_eq!(txq2.status().future, 1); + + // when + txq2.remove(&tx.hash(), &default_nonce); + txq2.remove(&tx2.hash(), &default_nonce); + + + // then + let stats = txq2.status(); + assert_eq!(stats.pending, 0); + assert_eq!(stats.future, 0); + } + + #[test] + fn should_move_transactions_to_future_if_gap_introduced() { + // given + let mut txq = TransactionQueue::new(); + let (tx, tx2) = new_txs(U256::from(1)); + let tx3 = new_tx(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().future, 1); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().pending, 3); + + // when + txq.remove(&tx.hash(), &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 1); + assert_eq!(stats.pending, 1); + } + + #[test] + fn should_clear_queue() { + // given + let mut txq = TransactionQueue::new(); + let (tx, tx2) = new_txs(U256::one()); + + // add + txq.add(tx2.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); + let stats = txq.status(); + assert_eq!(stats.pending, 2); + + // when + txq.clear(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 0); + } + + #[test] + fn should_drop_old_transactions_when_hitting_the_limit() { + // given + let mut txq = TransactionQueue::with_limits(1, 1); + let (tx, tx2) = new_txs(U256::one()); + txq.add(tx.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().pending, 1); + + // when + txq.add(tx2.clone(), &default_nonce).unwrap(); + + // then + let t = txq.top_transactions(2); + assert_eq!(txq.status().pending, 1); + assert_eq!(t.len(), 1); + assert_eq!(t[0], tx); + } + + #[test] + fn should_limit_future_transactions() { + let mut txq = TransactionQueue::with_limits(10, 1); + let (tx1, tx2) = new_txs(U256::from(4)); + let (tx3, tx4) = new_txs(U256::from(4)); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx3.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().pending, 2); + + // when + txq.add(tx2.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().future, 1); + txq.add(tx4.clone(), &default_nonce).unwrap(); + + // then + assert_eq!(txq.status().future, 1); + } + + #[test] + fn should_drop_transactions_with_old_nonces() { + let mut txq = TransactionQueue::new(); + let tx = new_tx(); + let last_nonce = tx.nonce + U256::one(); + let fetch_last_nonce = |_a: &Address| last_nonce; + + // when + txq.add(tx, &fetch_last_nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 0); + assert_eq!(stats.future, 0); + } + + #[test] + fn should_not_insert_same_transaction_twice() { + // given + let nonce = |a: &Address| default_nonce(a) + U256::one(); + let mut txq = TransactionQueue::new(); + let (_tx1, tx2) = new_txs(U256::from(1)); + txq.add(tx2.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().future, 1); + assert_eq!(txq.status().pending, 0); + + // when + txq.add(tx2.clone(), &nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 1); + assert_eq!(stats.pending, 0); + } + + #[test] + fn should_accept_same_transaction_twice_if_removed() { + // given + let mut txq = TransactionQueue::new(); + let (tx1, tx2) = new_txs(U256::from(1)); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().pending, 2); + + // when + txq.remove(&tx1.hash(), &default_nonce); + assert_eq!(txq.status().pending, 0); + assert_eq!(txq.status().future, 1); + txq.add(tx1.clone(), &default_nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 2); + } + + #[test] + fn should_not_move_to_future_if_state_nonce_is_higher() { + // given + let next_nonce = |a: &Address| default_nonce(a) + U256::one(); + let mut txq = TransactionQueue::new(); + let (tx, tx2) = new_txs(U256::from(1)); + let tx3 = new_tx(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().future, 1); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); + assert_eq!(txq.status().pending, 3); + + // when + txq.remove(&tx.hash(), &next_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 2); + } + + #[test] + fn should_replace_same_transaction_when_has_higher_fee() { + // given + let mut txq = TransactionQueue::new(); + let keypair = KeyPair::create().unwrap(); + let tx = new_unsigned_tx(U256::from(123)).sign(&keypair.secret()); + let tx2 = { + let mut tx2 = tx.deref().clone(); + tx2.gas_price = U256::from(200); + tx2.sign(&keypair.secret()) + }; + + // when + txq.add(tx, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 1); + assert_eq!(stats.future, 0); + assert_eq!(txq.top_transactions(1)[0].gas_price, U256::from(200)); + } + + #[test] + fn should_replace_same_transaction_when_importing_to_futures() { + // given + let mut txq = TransactionQueue::new(); + let keypair = KeyPair::create().unwrap(); + let tx0 = new_unsigned_tx(U256::from(123)).sign(&keypair.secret()); + let tx1 = { + let mut tx1 = tx0.deref().clone(); + tx1.nonce = U256::from(124); + tx1.sign(&keypair.secret()) + }; + let tx2 = { + let mut tx2 = tx1.deref().clone(); + tx2.gas_price = U256::from(200); + tx2.sign(&keypair.secret()) + }; + + // when + txq.add(tx1, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); + assert_eq!(txq.status().future, 1); + txq.add(tx0, &default_nonce).unwrap(); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 2); + assert_eq!(txq.top_transactions(2)[1].gas_price, U256::from(200)); + } + + #[test] + fn should_recalculate_height_when_removing_from_future() { + // given + let previous_nonce = |a: &Address| default_nonce(a) - U256::one(); + let next_nonce = |a: &Address| default_nonce(a) + U256::one(); + let mut txq = TransactionQueue::new(); + let (tx1, tx2) = new_txs(U256::one()); + txq.add(tx1.clone(), &previous_nonce).unwrap(); + txq.add(tx2, &previous_nonce).unwrap(); + assert_eq!(txq.status().future, 2); + + // when + txq.remove(&tx1.hash(), &next_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 1); + } +} diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..b8cc2a0f0 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -24,6 +24,7 @@ extern crate rustc_serialize; extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; +extern crate ethminer; #[macro_use] extern crate log as rlog; extern crate env_logger; @@ -32,26 +33,39 @@ extern crate fdlimit; extern crate daemonize; extern crate time; extern crate number_prefix; +extern crate rpassword; #[cfg(feature = "rpc")] extern crate ethcore_rpc as rpc; -use std::net::{SocketAddr}; +use std::net::{SocketAddr, IpAddr}; use std::env; use std::process::exit; use std::path::PathBuf; use env_logger::LogBuilder; use ctrlc::CtrlC; use util::*; -use util::panics::MayPanic; +use util::panics::{MayPanic, ForwardPanic, PanicHandler}; use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; -use ethsync::{EthSync, SyncConfig}; +use ethsync::{EthSync, SyncConfig, SyncProvider}; +use ethminer::{Miner, MinerService}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; +use util::keys::store::*; + +fn die_with_message(msg: &str) -> ! { + println!("ERROR: {}", msg); + exit(1); +} + +#[macro_export] +macro_rules! die { + ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); +} const USAGE: &'static str = r#" Parity. Ethereum Client. @@ -59,38 +73,93 @@ Parity. Ethereum Client. Copyright 2015, 2016 Ethcore (UK) Limited Usage: - parity daemon [options] [ --no-bootstrap | ... ] - parity [options] [ --no-bootstrap | ... ] + parity daemon [options] + parity account (new | list) + parity [options] -Options: - --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file - or frontier, mainnet, morden, or testnet [default: frontier]. - --archive Client should not prune the state/storage trie. - -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] +Protocol Options: + --chain CHAIN Specify the blockchain type. CHAIN may be either a + JSON chain specification file or olympic, frontier, + homestead, mainnet, morden, or testnet + [default: homestead]. + -d --db-path PATH Specify the database & configuration directory path + [default: $HOME/.parity]. + --keys-path PATH Specify the path for JSON key files to be found + [default: $HOME/.web3/keys]. + --identity NAME Specify your node's name. - --no-bootstrap Don't bother trying to connect to any nodes initially. - --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. - --public-address URL Specify the IP/port on which peers may connect. - --address URL Equivalent to --listen-address URL --public-address URL. +Networking Options: + --port PORT Override the port on which the node should listen + [default: 30303]. --peers NUM Try to maintain that many peers [default: 25]. + --nat METHOD Specify method to use for determining public + address. Must be one of: any, none, upnp, + extip: [default: any]. + --network-id INDEX Override the network identifier from the chain we + are on. + --bootnodes NODES Override the bootnodes from our chain. NODES should + be comma-delimited enodes. --no-discovery Disable new peer discovery. - --no-upnp Disable trying to figure out the correct public adderss over UPnP. - --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. - - --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. - --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. - --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. + --node-key KEY Specify node secret key, either as 64-character hex + string or input to SHA3 operation. +API and Console Options: -j --jsonrpc Enable the JSON-RPC API sever. - --jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545]. - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. + --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API + server [default: 127.0.0.1]. + --jsonrpc-port PORT Specify the port portion of the JSONRPC API server + [default: 8545]. + --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses + [default: null]. + --jsonrpc-apis APIS Specify the APIs available through the JSONRPC + interface. APIS is a comma-delimited list of API + name. Possible name are web3, eth and net. + [default: web3,eth,net,personal]. - --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards - from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. +Sealing/Mining Options: + --gas-price WEI Minimum amount of Wei to be paid for a transaction + to be accepted for mining [default: 20000000000]. + --gas-floor-target GAS Amount of gas per block to target when sealing a new + block [default: 4712388]. + --author ADDRESS Specify the block author (aka "coinbase") address + for sending block rewards from sealed blocks + [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. + --extra-data STRING Specify a custom extra-data for authored blocks, no + more than 32 characters. - -l --logging LOGGING Specify the logging level. +Footprint Options: + --pruning METHOD Configure pruning of the state/storage trie. METHOD + may be one of: archive, basic (experimental), fast + (experimental) [default: archive]. + --cache-pref-size BYTES Specify the prefered size of the blockchain cache in + bytes [default: 16384]. + --cache-max-size BYTES Specify the maximum size of the blockchain cache in + bytes [default: 262144]. + --queue-max-size BYTES Specify the maximum size of memory to use for block + queue [default: 52428800]. + --cache MEGABYTES Set total amount of discretionary memory to use for + the entire system, overrides other cache and queue + options. + +Geth-compatibility Options: + --datadir PATH Equivalent to --db-path PATH. + --testnet Equivalent to --chain testnet. + --networkid INDEX Equivalent to --network-id INDEX. + --maxpeers COUNT Equivalent to --peers COUNT. + --nodekey KEY Equivalent to --node-key KEY. + --nodiscover Equivalent to --no-discovery. + --rpc Equivalent to --jsonrpc. + --rpcaddr HOST Equivalent to --jsonrpc-addr HOST. + --rpcport PORT Equivalent to --jsonrpc-port PORT. + --rpcapi APIS Equivalent to --jsonrpc-apis APIS. + --rpccorsdomain URL Equivalent to --jsonrpc-cors URL. + --gasprice WEI Equivalent to --gas-price WEI. + --etherbase ADDRESS Equivalent to --author ADDRESS. + --extradata STRING Equivalent to --extra-data STRING. + +Miscellaneous Options: + -l --logging LOGGING Specify the logging level. Must conform to the same + format as RUST_LOG. -v --version Show information about version. -h --help Show this screen. "#; @@ -98,30 +167,52 @@ Options: #[derive(Debug, RustcDecodable)] struct Args { cmd_daemon: bool, + cmd_account: bool, + cmd_new: bool, + cmd_list: bool, arg_pid_file: String, - arg_enode: Vec, flag_chain: String, flag_db_path: String, + flag_identity: String, + flag_cache: Option, flag_keys_path: String, - flag_archive: bool, - flag_no_bootstrap: bool, - flag_listen_address: String, - flag_public_address: Option, - flag_address: Option, - flag_peers: u32, + flag_bootnodes: Option, + flag_network_id: Option, + flag_pruning: String, + flag_port: u16, + flag_peers: usize, flag_no_discovery: bool, - flag_no_upnp: bool, + flag_nat: String, flag_node_key: Option, flag_cache_pref_size: usize, flag_cache_max_size: usize, flag_queue_max_size: usize, flag_jsonrpc: bool, - flag_jsonrpc_url: String, + flag_jsonrpc_addr: String, + flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, + flag_jsonrpc_apis: String, + flag_author: String, + flag_gas_price: String, + flag_gas_floor_target: String, + flag_extra_data: Option, flag_logging: Option, flag_version: bool, - flag_author: String, - flag_extra_data: Option, + // geth-compatibility... + flag_nodekey: Option, + flag_nodiscover: bool, + flag_maxpeers: Option, + flag_datadir: Option, + flag_extradata: Option, + flag_etherbase: Option, + flag_gasprice: Option, + flag_rpc: bool, + flag_rpcaddr: Option, + flag_rpcport: Option, + flag_rpccorsdomain: Option, + flag_rpcapi: Option, + flag_testnet: bool, + flag_networkid: Option, } fn setup_log(init: &Option) { @@ -151,19 +242,46 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) { +fn setup_rpc_server( + client: Arc, + sync: Arc, + secret_store: Arc, + miner: Arc, + url: &str, + cors_domain: &str, + apis: Vec<&str> +) -> Option> { use rpc::v1::*; - let mut server = rpc::HttpServer::new(1); - server.add_delegate(Web3Client::new().to_delegate()); - server.add_delegate(EthClient::new(&client, &sync).to_delegate()); - server.add_delegate(EthFilterClient::new(&client).to_delegate()); - server.add_delegate(NetClient::new(&sync).to_delegate()); - server.start_async(url, cors_domain); + let server = rpc::RpcServer::new(); + for api in apis.into_iter() { + match api { + "web3" => server.add_delegate(Web3Client::new().to_delegate()), + "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), + "eth" => { + server.add_delegate(EthClient::new(&client, &sync, &secret_store, &miner).to_delegate()); + server.add_delegate(EthFilterClient::new(&client, &miner).to_delegate()); + } + "personal" => server.add_delegate(PersonalClient::new(&secret_store).to_delegate()), + _ => { + die!("{}: Invalid API name to be enabled.", api); + } + } + } + Some(server.start_http(url, cors_domain, 1)) } #[cfg(not(feature = "rpc"))] -fn setup_rpc_server(_client: Arc, _sync: Arc, _url: &str) { +fn setup_rpc_server( + _client: Arc, + _sync: Arc, + _secret_store: Arc, + _miner: Arc, + _url: &str, + _cors_domain: &str, + _apis: Vec<&str> +) -> Option> { + None } fn print_version() { @@ -179,16 +297,6 @@ By Wood/Paronyan/Kotewicz/DrwiÄ™ga/Volf.\ ", version()); } -fn die_with_message(msg: &str) -> ! { - println!("ERROR: {}", msg); - exit(1); -} - -#[macro_export] -macro_rules! die { - ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); -} - struct Configuration { args: Args } @@ -201,15 +309,33 @@ impl Configuration { } fn path(&self) -> String { - self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) + let d = self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path); + d.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) } fn author(&self) -> Address { - Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) + let d = self.args.flag_etherbase.as_ref().unwrap_or(&self.args.flag_author); + Address::from_str(d).unwrap_or_else(|_| { + die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", d) + }) + } + + fn gas_floor_target(&self) -> U256 { + let d = &self.args.flag_gas_floor_target; + U256::from_dec_str(d).unwrap_or_else(|_| { + die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d) + }) + } + + fn gas_price(&self) -> U256 { + let d = self.args.flag_gasprice.as_ref().unwrap_or(&self.args.flag_gas_price); + U256::from_dec_str(d).unwrap_or_else(|_| { + die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d) + }) } fn extra_data(&self) -> Bytes { - match self.args.flag_extra_data { + match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) { Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), None => version_data(), Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); } @@ -221,11 +347,16 @@ impl Configuration { } fn spec(&self) -> Spec { + if self.args.flag_testnet { + return ethereum::new_morden(); + } match self.args.flag_chain.as_ref() { - "frontier" | "mainnet" => ethereum::new_frontier(), + "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), "morden" | "testnet" => ethereum::new_morden(), "olympic" => ethereum::new_olympic(), - f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()), + f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| { + die!("{}: Couldn't read chain specification file. Sure it exists?", f) + }).as_ref()), } } @@ -238,51 +369,78 @@ impl Configuration { } fn init_nodes(&self, spec: &Spec) -> Vec { - if self.args.flag_no_bootstrap { Vec::new() } else { - match self.args.arg_enode.len() { - 0 => spec.nodes().clone(), - _ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s))).collect(), - } + match self.args.flag_bootnodes { + Some(ref x) if x.len() > 0 => x.split(',').map(|s| { + Self::normalize_enode(s).unwrap_or_else(|| { + die!("{}: Invalid node address format given for a boot node.", s) + }) + }).collect(), + Some(_) => Vec::new(), + None => spec.nodes().clone(), } } #[cfg_attr(feature="dev", allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { - let mut listen_address = None; - let mut public_address = None; - - if let Some(ref a) = self.args.flag_address { - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --address", a))); - listen_address = public_address; - } - if listen_address.is_none() { - listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address))); - } - if let Some(ref a) = self.args.flag_public_address { - if public_address.is_some() { - die!("Conflicting flags provided: --address and --public-address"); - } - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --public-address", a))); - } + let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port)); + let public_address = if self.args.flag_nat.starts_with("extip:") { + let host = &self.args.flag_nat[6..]; + let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); + Some(SocketAddr::new(host, self.args.flag_port)) + } else { + listen_address + }; (listen_address, public_address) } fn net_settings(&self, spec: &Spec) -> NetworkConfiguration { let mut ret = NetworkConfiguration::new(); - ret.nat_enabled = !self.args.flag_no_upnp; + ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; ret.boot_nodes = self.init_nodes(spec); let (listen, public) = self.net_addresses(); ret.listen_address = listen; ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); - ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_peers; + ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; + ret.ideal_peers = self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); ret } + fn client_config(&self) -> ClientConfig { + let mut client_config = ClientConfig::default(); + match self.args.flag_cache { + Some(mb) => { + client_config.blockchain.max_cache_size = mb * 1024 * 1024; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size * 3 / 4; + } + None => { + client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; + client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + } + } + client_config.pruning = match self.args.flag_pruning.as_str() { + "archive" => journaldb::Algorithm::Archive, + "light" => journaldb::Algorithm::EarlyMerge, + "fast" => journaldb::Algorithm::OverlayRecent, + "basic" => journaldb::Algorithm::RefCounted, + _ => { die!("Invalid pruning method given."); } + }; + client_config.name = self.args.flag_identity.clone(); + client_config.queue.max_mem_use = self.args.flag_queue_max_size; + client_config + } + + fn sync_config(&self, spec: &Spec) -> SyncConfig { + let mut sync_config = SyncConfig::default(); + sync_config.network_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()).map_or(spec.network_id(), |id| { + U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --network-id/--networkid", id)) + }); + sync_config + } + fn execute(&self) { if self.args.flag_version { print_version(); @@ -295,10 +453,45 @@ impl Configuration { .start() .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); } + if self.args.cmd_account { + self.execute_account_cli(); + return; + } self.execute_client(); } + fn execute_account_cli(&self) { + use util::keys::store::SecretStore; + use rpassword::read_password; + let mut secret_store = SecretStore::new(); + if self.args.cmd_new { + println!("Please note that password is NOT RECOVERABLE."); + println!("Type password: "); + let password = read_password().unwrap(); + println!("Repeat password: "); + let password_repeat = read_password().unwrap(); + if password != password_repeat { + println!("Passwords do not match!"); + return; + } + println!("New account address:"); + let new_address = secret_store.new_account(&password).unwrap(); + println!("{:?}", new_address); + return; + } + if self.args.cmd_list { + println!("Known addresses:"); + for &(addr, _) in &secret_store.accounts().unwrap() { + println!("{:?}", addr); + } + } + } + + #[cfg_attr(feature="dev", allow(useless_format))] fn execute_client(&self) { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + // Setup logging setup_log(&self.args.flag_logging); // Raise fdlimit @@ -306,50 +499,74 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); - let mut sync_config = SyncConfig::default(); - sync_config.network_id = spec.network_id(); + let sync_config = self.sync_config(&spec); // Build client - let mut client_config = ClientConfig::default(); - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; - client_config.prefer_journal = !self.args.flag_archive; - client_config.queue.max_mem_use = self.args.flag_queue_max_size; - let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); - let client = service.client().clone(); - client.set_author(self.author()); - client.set_extra_data(self.extra_data()); + let mut service = ClientService::start(self.client_config(), spec, net_settings, &Path::new(&self.path())).unwrap(); + panic_handler.forward_from(&service); + let client = service.client(); + + // Miner + let miner = Miner::new(); + miner.set_author(self.author()); + miner.set_gas_floor_target(self.gas_floor_target()); + miner.set_extra_data(self.extra_data()); + miner.set_minimal_gas_price(self.gas_price()); // Sync - let sync = EthSync::register(service.network(), sync_config, client); + let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone()); + + // Secret Store + let account_service = Arc::new(AccountService::new()); // Setup rpc - if self.args.flag_jsonrpc { - setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors); - SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); + if self.args.flag_jsonrpc || self.args.flag_rpc { + let url = format!("{}:{}", + self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), + self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) + ); + SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); + let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); + // TODO: use this as the API list. + let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); + let server_handler = setup_rpc_server( + service.client(), + sync.clone(), + account_service.clone(), + miner.clone(), + &url, + cors, + apis.split(',').collect() + ); + if let Some(handler) = server_handler { + panic_handler.forward_from(handler.deref()); + } } // Register IO handler let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), - sync: sync + sync: sync.clone(), + accounts: account_service.clone(), }); service.io().register_handler(io_handler).expect("Error registering IO handler"); // Handle exit - wait_for_exit(&service); + wait_for_exit(panic_handler); } } -fn wait_for_exit(client_service: &ClientService) { +fn wait_for_exit(panic_handler: Arc) { let exit = Arc::new(Condvar::new()); // Handle possible exits let e = exit.clone(); CtrlC::set_handler(move || { e.notify_all(); }); + + // Handle panics let e = exit.clone(); - client_service.on_panic(move |_reason| { e.notify_all(); }); + panic_handler.on_panic(move |_reason| { e.notify_all(); }); // Wait for signal let mutex = Mutex::new(()); @@ -391,10 +608,16 @@ impl Informant { let chain_info = client.chain_info(); let queue_info = client.queue_info(); let cache_info = client.blockchain_cache_info(); - let report = client.report(); let sync_info = sync.status(); - if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { + let mut write_report = self.report.write().unwrap(); + let report = client.report(); + + if let (_, _, &Some(ref last_report)) = ( + self.chain_info.read().unwrap().deref(), + self.cache_info.read().unwrap().deref(), + write_report.deref() + ) { println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, @@ -417,26 +640,34 @@ impl Informant { *self.chain_info.write().unwrap().deref_mut() = Some(chain_info); *self.cache_info.write().unwrap().deref_mut() = Some(cache_info); - *self.report.write().unwrap().deref_mut() = Some(report); + *write_report.deref_mut() = Some(report); } } const INFO_TIMER: TimerToken = 0; +const ACCOUNT_TICK_TIMER: TimerToken = 10; +const ACCOUNT_TICK_MS: u64 = 60000; + struct ClientIoHandler { client: Arc, sync: Arc, + accounts: Arc, info: Informant, } impl IoHandler for ClientIoHandler { fn initialize(&self, io: &IoContext) { io.register_timer(INFO_TIMER, 5000).expect("Error registering timer"); + io.register_timer(ACCOUNT_TICK_TIMER, ACCOUNT_TICK_MS).expect("Error registering account timer"); + } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if INFO_TIMER == timer { - self.info.tick(&self.client, &self.sync); + match timer { + INFO_TIMER => { self.info.tick(&self.client, &self.sync); } + ACCOUNT_TICK_TIMER => { self.accounts.tick(); }, + _ => {} } } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..88b69e82c 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Ethcore jsonrpc" name = "ethcore-rpc" -version = "0.9.99" +version = "1.1.0" license = "GPL-3.0" authors = ["Ethcore . + #[cfg(not(feature = "serde_macros"))] mod inner { extern crate syntex; diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 0653a0c33..3096a45c9 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(feature="nightly", feature(custom_derive, custom_attribute, plugin))] #![cfg_attr(feature="nightly", plugin(serde_macros, clippy))] +#[macro_use] +extern crate log; extern crate rustc_serialize; extern crate serde; extern crate serde_json; @@ -27,35 +29,46 @@ extern crate jsonrpc_http_server; extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; +extern crate ethminer; extern crate transient_hashmap; +use std::sync::Arc; +use std::thread; +use util::panics::PanicHandler; use self::jsonrpc_core::{IoHandler, IoDelegate}; pub mod v1; /// Http server. -pub struct HttpServer { - handler: IoHandler, - threads: usize +pub struct RpcServer { + handler: Arc, } -impl HttpServer { +impl RpcServer { /// Construct new http server object with given number of threads. - pub fn new(threads: usize) -> HttpServer { - HttpServer { - handler: IoHandler::new(), - threads: threads + pub fn new() -> RpcServer { + RpcServer { + handler: Arc::new(IoHandler::new()), } } /// Add io delegate. - pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { + pub fn add_delegate(&self, delegate: IoDelegate) where D: Send + Sync + 'static { self.handler.add_delegate(delegate); } - /// Start server asynchronously in new thread - pub fn start_async(self, addr: &str, cors_domain: &str) { - let server = jsonrpc_http_server::Server::new(self.handler, self.threads); - server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned())) + /// Start server asynchronously in new thread and returns panic handler. + pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc { + let addr = addr.to_owned(); + let cors_domain = cors_domain.to_owned(); + let panic_handler = PanicHandler::new_in_arc(); + let ph = panic_handler.clone(); + let server = jsonrpc_http_server::Server::new(self.handler.clone()); + thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || { + ph.catch_panic(move || { + server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads); + }).unwrap() + }).expect("Error while creating jsonrpc http thread"); + panic_handler } } diff --git a/rpc/src/v1/helpers/external_miner.rs b/rpc/src/v1/helpers/external_miner.rs new file mode 100644 index 000000000..4cbda8928 --- /dev/null +++ b/rpc/src/v1/helpers/external_miner.rs @@ -0,0 +1,59 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::RwLock; +use util::numbers::U256; +use util::hash::H256; + +/// External miner interface. +pub trait ExternalMinerService: Send + Sync { + /// Submit hashrate for given miner. + fn submit_hashrate(&self, hashrate: U256, id: H256); + + /// Total hashrate. + fn hashrate(&self) -> U256; + + /// Returns true if external miner is mining. + fn is_mining(&self) -> bool; +} + +/// External Miner. +pub struct ExternalMiner { + hashrates: RwLock>, +} + +impl Default for ExternalMiner { + fn default() -> Self { + ExternalMiner { + hashrates: RwLock::new(HashMap::new()), + } + } +} + +impl ExternalMinerService for ExternalMiner { + fn submit_hashrate(&self, hashrate: U256, id: H256) { + self.hashrates.write().unwrap().insert(id, hashrate); + } + + fn hashrate(&self) -> U256 { + self.hashrates.read().unwrap().iter().fold(U256::from(0), |sum, (_, v)| sum + *v) + } + + fn is_mining(&self) -> bool { + !self.hashrates.read().unwrap().is_empty() + } +} diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index b1a5c05ba..8c574cff6 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -16,6 +16,8 @@ mod poll_manager; mod poll_filter; +pub mod external_miner; pub use self::poll_manager::PollManager; pub use self::poll_filter::PollFilter; +pub use self::external_miner::{ExternalMinerService, ExternalMiner}; diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 465290270..f9ed6230c 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -1,10 +1,13 @@ //! Helper type with all filter possibilities. +use util::hash::H256; use ethcore::filter::Filter; +pub type BlockNumber = u64; + #[derive(Clone)] pub enum PollFilter { - Block, - PendingTransaction, - Logs(Filter) + Block(BlockNumber), + PendingTransaction(Vec), + Logs(BlockNumber, Filter) } diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 36a6352c2..9735d7d5d 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -22,28 +22,13 @@ use transient_hashmap::{TransientHashMap, Timer, StandardTimer}; const POLL_LIFETIME: u64 = 60; pub type PollId = usize; -pub type BlockNumber = u64; - -pub struct PollInfo { - pub filter: F, - pub block_number: BlockNumber -} - -impl Clone for PollInfo where F: Clone { - fn clone(&self) -> Self { - PollInfo { - filter: self.filter.clone(), - block_number: self.block_number.clone() - } - } -} /// Indexes all poll requests. /// /// Lazily garbage collects unused polls info. pub struct PollManager where T: Timer { - polls: TransientHashMap, T>, - next_available_id: PollId + polls: TransientHashMap, + next_available_id: PollId, } impl PollManager { @@ -54,6 +39,7 @@ impl PollManager { } impl PollManager where T: Timer { + pub fn new_with_timer(timer: T) -> Self { PollManager { polls: TransientHashMap::new_with_timer(POLL_LIFETIME, timer), @@ -64,31 +50,30 @@ impl PollManager where T: Timer { /// Returns id which can be used for new poll. /// /// Stores information when last poll happend. - pub fn create_poll(&mut self, filter: F, block: BlockNumber) -> PollId { + pub fn create_poll(&mut self, filter: F) -> PollId { self.polls.prune(); + let id = self.next_available_id; + self.polls.insert(id, filter); + self.next_available_id += 1; - self.polls.insert(id, PollInfo { - filter: filter, - block_number: block, - }); id } - /// Updates information when last poll happend. - pub fn update_poll(&mut self, id: &PollId, block: BlockNumber) { - self.polls.prune(); - if let Some(info) = self.polls.get_mut(id) { - info.block_number = block; - } - } - - /// Returns number of block when last poll happend. - pub fn get_poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { + // Implementation is always using `poll_mut` + #[cfg(test)] + /// Get a reference to stored poll filter + pub fn poll(&mut self, id: &PollId) -> Option<&F> { self.polls.prune(); self.polls.get(id) } + /// Get a mutable reference to stored poll filter + pub fn poll_mut(&mut self, id: &PollId) -> Option<&mut F> { + self.polls.prune(); + self.polls.get_mut(id) + } + /// Removes poll info. pub fn remove_poll(&mut self, id: &PollId) { self.polls.remove(id); @@ -97,48 +82,46 @@ impl PollManager where T: Timer { #[cfg(test)] mod tests { - use std::cell::RefCell; + use std::cell::Cell; use transient_hashmap::Timer; use v1::helpers::PollManager; struct TestTimer<'a> { - time: &'a RefCell, + time: &'a Cell, } impl<'a> Timer for TestTimer<'a> { fn get_time(&self) -> i64 { - *self.time.borrow() + self.time.get() } } #[test] fn test_poll_indexer() { - let time = RefCell::new(0); + let time = Cell::new(0); let timer = TestTimer { time: &time, }; let mut indexer = PollManager::new_with_timer(timer); - assert_eq!(indexer.create_poll(false, 20), 0); - assert_eq!(indexer.create_poll(true, 20), 1); + assert_eq!(indexer.create_poll(20), 0); + assert_eq!(indexer.create_poll(20), 1); - *time.borrow_mut() = 10; - indexer.update_poll(&0, 21); - assert_eq!(indexer.get_poll_info(&0).unwrap().filter, false); - assert_eq!(indexer.get_poll_info(&0).unwrap().block_number, 21); + time.set(10); + *indexer.poll_mut(&0).unwrap() = 21; + assert_eq!(*indexer.poll(&0).unwrap(), 21); + assert_eq!(*indexer.poll(&1).unwrap(), 20); - *time.borrow_mut() = 30; - indexer.update_poll(&1, 23); - assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23); + time.set(30); + *indexer.poll_mut(&1).unwrap() = 23; + assert_eq!(*indexer.poll(&1).unwrap(), 23); - *time.borrow_mut() = 75; - indexer.update_poll(&0, 30); - assert!(indexer.get_poll_info(&0).is_none()); - assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23); + time.set(75); + assert!(indexer.poll(&0).is_none()); + assert_eq!(*indexer.poll(&1).unwrap(), 23); indexer.remove_poll(&1); - assert!(indexer.get_poll_info(&1).is_none()); + assert!(indexer.poll(&1).is_none()); } + } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7113c55b1..fda391304 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -15,37 +15,69 @@ // along with Parity. If not, see . //! Eth rpc implementation. -use std::collections::HashMap; -use std::sync::{Arc, Weak, Mutex, RwLock}; -use ethsync::{EthSync, SyncState}; +use std::collections::HashSet; +use std::sync::{Arc, Weak, Mutex}; +use std::ops::Deref; +use ethsync::{SyncProvider, SyncState}; +use ethminer::{MinerService}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; use util::rlp::encode; use ethcore::client::*; -use ethcore::block::{IsBlock}; +use ethcore::block::IsBlock; use ethcore::views::*; -//#[macro_use] extern crate log; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; +use ethcore::transaction::Transaction as EthTransaction; use v1::traits::{Eth, EthFilter}; -use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log}; -use v1::helpers::{PollFilter, PollManager}; +use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log}; +use v1::helpers::{PollFilter, PollManager, ExternalMinerService, ExternalMiner}; +use util::keys::store::AccountProvider; /// Eth rpc implementation. -pub struct EthClient { - client: Weak, - sync: Weak, - hashrates: RwLock>, +pub struct EthClient + where C: BlockChainClient, + S: SyncProvider, + A: AccountProvider, + M: MinerService, + EM: ExternalMinerService { + client: Weak, + sync: Weak, + accounts: Weak, + miner: Weak, + external_miner: EM, } -impl EthClient { +impl EthClient + where C: BlockChainClient, + S: SyncProvider, + A: AccountProvider, + M: MinerService { + /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, accounts: &Arc, miner: &Arc) -> Self { + EthClient::new_with_external_miner(client, sync, accounts, miner, ExternalMiner::default()) + } +} + + +impl EthClient + where C: BlockChainClient, + S: SyncProvider, + A: AccountProvider, + M: MinerService, + EM: ExternalMinerService { + + /// Creates new EthClient with custom external miner. + pub fn new_with_external_miner(client: &Arc, sync: &Arc, accounts: &Arc, miner: &Arc, em: EM) + -> EthClient { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), - hashrates: RwLock::new(HashMap::new()), + miner: Arc::downgrade(miner), + accounts: Arc::downgrade(accounts), + external_miner: em, } } @@ -93,12 +125,23 @@ impl EthClient { None => Ok(Value::Null) } } + + fn uncle(&self, _block: BlockId, _index: usize) -> Result { + // TODO: implement! + Ok(Value::Null) + } } -impl Eth for EthClient { +impl Eth for EthClient + where C: BlockChainClient + 'static, + S: SyncProvider + 'static, + A: AccountProvider + 'static, + M: MinerService + 'static, + EM: ExternalMinerService + 'static { + fn protocol_version(&self, params: Params) -> Result { match params { - Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), + Params::None => Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())), _ => Err(Error::invalid_params()) } } @@ -132,7 +175,7 @@ impl Eth for EthClient { // TODO: return real value of mining once it's implemented. fn is_mining(&self, params: Params) -> Result { match params { - Params::None => to_value(&!self.hashrates.read().unwrap().is_empty()), + Params::None => to_value(&self.external_miner.is_mining()), _ => Err(Error::invalid_params()) } } @@ -140,7 +183,7 @@ impl Eth for EthClient { // TODO: return real hashrate once we have mining fn hashrate(&self, params: Params) -> Result { match params { - Params::None => to_value(&self.hashrates.read().unwrap().iter().fold(0u64, |sum, (_, v)| sum + v)), + Params::None => to_value(&self.external_miner.hashrate()), _ => Err(Error::invalid_params()) } } @@ -152,6 +195,14 @@ impl Eth for EthClient { } } + fn accounts(&self, _: Params) -> Result { + let store = take_weak!(self.accounts); + match store.accounts() { + Ok(account_list) => to_value(&account_list), + Err(_) => Err(Error::internal_error()) + } + } + fn block_number(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.client).chain_info().best_block_number)), @@ -159,26 +210,59 @@ impl Eth for EthClient { } } - fn block_transaction_count(&self, params: Params) -> Result { + fn balance(&self, params: Params) -> Result { + from_params::<(Address, BlockNumber)>(params) + .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).balance(&address))) + } + + fn storage_at(&self, params: Params) -> Result { + from_params::<(Address, U256, BlockNumber)>(params) + .and_then(|(address, position, _block_number)| + to_value(&U256::from(take_weak!(self.client).storage_at(&address, &H256::from(position))))) + } + + fn transaction_count(&self, params: Params) -> Result { + from_params::<(Address, BlockNumber)>(params) + .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).nonce(&address))) + } + + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) - .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { - Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), - None => Ok(Value::Null) + .and_then(|(hash,)| // match + to_value(&take_weak!(self.client).block(BlockId::Hash(hash)) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).transactions_count())))) + } + + fn block_transaction_count_by_number(&self, params: Params) -> Result { + from_params::<(BlockNumber,)>(params) + .and_then(|(block_number,)| match block_number { + BlockNumber::Pending => to_value(&U256::from(take_weak!(self.miner).status().transaction_queue_pending)), + _ => to_value(&take_weak!(self.client).block(block_number.into()) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).transactions_count()))) }) } - fn block_uncles_count(&self, params: Params) -> Result { + fn block_uncles_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) - .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { - Some(bytes) => to_value(&BlockView::new(&bytes).uncles_count()), - None => Ok(Value::Null) + .and_then(|(hash,)| + to_value(&take_weak!(self.client).block(BlockId::Hash(hash)) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).uncles_count())))) + } + + fn block_uncles_count_by_number(&self, params: Params) -> Result { + from_params::<(BlockNumber,)>(params) + .and_then(|(block_number,)| match block_number { + BlockNumber::Pending => to_value(&U256::from(0)), + _ => to_value(&take_weak!(self.client).block(block_number.into()) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).uncles_count()))) }) } // TODO: do not ignore block number param fn code_at(&self, params: Params) -> Result { from_params::<(Address, BlockNumber)>(params) - .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new))) + .and_then(|(address, _block_number)| + to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new))) } fn block_by_hash(&self, params: Params) -> Result { @@ -206,6 +290,23 @@ impl Eth for EthClient { .and_then(|(number, index)| self.transaction(TransactionId::Location(number.into(), index.value()))) } + fn uncle_by_block_hash_and_index(&self, params: Params) -> Result { + from_params::<(H256, Index)>(params) + .and_then(|(hash, index)| self.uncle(BlockId::Hash(hash), index.value())) + } + + fn uncle_by_block_number_and_index(&self, params: Params) -> Result { + from_params::<(BlockNumber, Index)>(params) + .and_then(|(number, index)| self.uncle(number.into(), index.value())) + } + + fn compilers(&self, params: Params) -> Result { + match params { + Params::None => to_value(&vec![] as &Vec), + _ => Err(Error::invalid_params()) + } + } + fn logs(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { @@ -220,8 +321,9 @@ impl Eth for EthClient { fn work(&self, params: Params) -> Result { match params { Params::None => { - let c = take_weak!(self.client); - let u = c.sealing_block().lock().unwrap(); + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + let u = miner.sealing_block(client.deref()).lock().unwrap(); match *u { Some(ref b) => { let pow_hash = b.hash(); @@ -239,44 +341,84 @@ impl Eth for EthClient { fn submit_work(&self, params: Params) -> Result { from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| { // trace!("Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); - let c = take_weak!(self.client); + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); let seal = vec![encode(&mix_hash).to_vec(), encode(&nonce).to_vec()]; - let r = c.submit_seal(pow_hash, seal); + let r = miner.submit_seal(client.deref(), pow_hash, seal); to_value(&r.is_ok()) }) } fn submit_hashrate(&self, params: Params) -> Result { // TODO: Index should be U256. - from_params::<(Index, H256)>(params).and_then(|(rate, id)| { - self.hashrates.write().unwrap().insert(id, rate.value() as u64); + from_params::<(U256, H256)>(params).and_then(|(rate, id)| { + self.external_miner.submit_hashrate(rate, id); to_value(&true) }) } + + fn send_transaction(&self, params: Params) -> Result { + from_params::<(TransactionRequest, )>(params) + .and_then(|(transaction_request, )| { + let accounts = take_weak!(self.accounts); + match accounts.account_secret(&transaction_request.from) { + Ok(secret) => { + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + + let transaction: EthTransaction = transaction_request.into(); + let signed_transaction = transaction.sign(&secret); + let hash = signed_transaction.hash(); + + let import = miner.import_transactions(vec![signed_transaction], |a: &Address| client.nonce(a)); + match import { + Ok(_) => to_value(&hash), + Err(e) => { + warn!("Error sending transaction: {:?}", e); + to_value(&U256::zero()) + } + } + }, + Err(_) => { to_value(&U256::zero()) } + } + }) + } } /// Eth filter rpc implementation. -pub struct EthFilterClient { - client: Weak, +pub struct EthFilterClient + where C: BlockChainClient, + M: MinerService { + + client: Weak, + miner: Weak, polls: Mutex>, } -impl EthFilterClient { +impl EthFilterClient + where C: BlockChainClient, + M: MinerService { + /// Creates new Eth filter client. - pub fn new(client: &Arc) -> Self { + pub fn new(client: &Arc, miner: &Arc) -> Self { EthFilterClient { client: Arc::downgrade(client), - polls: Mutex::new(PollManager::new()) + miner: Arc::downgrade(miner), + polls: Mutex::new(PollManager::new()), } } } -impl EthFilter for EthFilterClient { +impl EthFilter for EthFilterClient + where C: BlockChainClient + 'static, + M: MinerService + 'static { + fn new_filter(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::Logs(filter.into()), take_weak!(self.client).chain_info().best_block_number); + let block_number = take_weak!(self.client).chain_info().best_block_number; + let id = polls.create_poll(PollFilter::Logs(block_number, filter.into())); to_value(&U256::from(id)) }) } @@ -285,7 +427,7 @@ impl EthFilter for EthFilterClient { match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::Block, take_weak!(self.client).chain_info().best_block_number); + let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); to_value(&U256::from(id)) }, _ => Err(Error::invalid_params()) @@ -296,7 +438,9 @@ impl EthFilter for EthFilterClient { match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::PendingTransaction, take_weak!(self.client).chain_info().best_block_number); + let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); + let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); + to_value(&U256::from(id)) }, _ => Err(Error::invalid_params()) @@ -307,37 +451,47 @@ impl EthFilter for EthFilterClient { let client = take_weak!(self.client); from_params::<(Index,)>(params) .and_then(|(index,)| { - let info = self.polls.lock().unwrap().get_poll_info(&index.value()).cloned(); - match info { + let mut polls = self.polls.lock().unwrap(); + match polls.poll_mut(&index.value()) { None => Ok(Value::Array(vec![] as Vec)), - Some(info) => match info.filter { - PollFilter::Block => { + Some(filter) => match *filter { + PollFilter::Block(ref mut block_number) => { // + 1, cause we want to return hashes including current block hash. let current_number = client.chain_info().best_block_number + 1; - let hashes = (info.block_number..current_number).into_iter() + let hashes = (*block_number..current_number).into_iter() .map(BlockId::Number) .filter_map(|id| client.block_hash(id)) .collect::>(); - self.polls.lock().unwrap().update_poll(&index.value(), current_number); + *block_number = current_number; to_value(&hashes) }, - PollFilter::PendingTransaction => { - // TODO: fix implementation once TransactionQueue is merged - to_value(&vec![] as &Vec) + PollFilter::PendingTransaction(ref mut previous_hashes) => { + let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); + // calculate diff + let previous_hashes_set = previous_hashes.into_iter().map(|h| h.clone()).collect::>(); + let diff = current_hashes + .iter() + .filter(|hash| previous_hashes_set.contains(&hash)) + .cloned() + .collect::>(); + + *previous_hashes = current_hashes; + + to_value(&diff) }, - PollFilter::Logs(mut filter) => { - filter.from_block = BlockId::Number(info.block_number); + PollFilter::Logs(ref mut block_number, ref mut filter) => { + filter.from_block = BlockId::Number(*block_number); filter.to_block = BlockId::Latest; - let logs = client.logs(filter) + let logs = client.logs(filter.clone()) .into_iter() .map(From::from) .collect::>(); let current_number = client.chain_info().best_block_number; - self.polls.lock().unwrap().update_poll(&index.value(), current_number); + *block_number = current_number; to_value(&logs) } } diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 9e24caad2..e52fc0bd4 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -17,29 +17,34 @@ //! Net rpc implementation. use std::sync::{Arc, Weak}; use jsonrpc_core::*; -use ethsync::EthSync; +use ethsync::SyncProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient { - sync: Weak +pub struct NetClient where S: SyncProvider { + sync: Weak } -impl NetClient { +impl NetClient where S: SyncProvider { /// Creates new NetClient. - pub fn new(sync: &Arc) -> Self { + pub fn new(sync: &Arc) -> Self { NetClient { sync: Arc::downgrade(sync) } } } -impl Net for NetClient { +impl Net for NetClient where S: SyncProvider + 'static { fn version(&self, _: Params) -> Result { - Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) + Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())) } fn peer_count(&self, _params: Params) -> Result { - Ok(Value::U64(take_weak!(self.sync).status().num_peers as u64)) + Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned())) + } + + fn is_listening(&self, _: Params) -> Result { + // right now (11 march 2016), we are always listening for incoming connections + Ok(Value::Bool(true)) } } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 48e1b1c6a..2822059d6 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -20,33 +20,26 @@ use jsonrpc_core::*; use v1::traits::Personal; use util::keys::store::*; use util::Address; -use std::sync::RwLock; /// Account management (personal) rpc implementation. -pub struct PersonalClient { - secret_store: Weak>, +pub struct PersonalClient where A: AccountProvider { + accounts: Weak, } -impl PersonalClient { +impl PersonalClient where A: AccountProvider { /// Creates new PersonalClient - pub fn new(store: &Arc>) -> Self { + pub fn new(store: &Arc) -> Self { PersonalClient { - secret_store: Arc::downgrade(store), + accounts: Arc::downgrade(store), } } } -impl Personal for PersonalClient { +impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let store = take_weak!(self.accounts); match store.accounts() { - Ok(account_list) => { - Ok(Value::Array(account_list.iter() - .map(|&(account, _)| Value::String(format!("{:?}", account))) - .collect::>()) - ) - } + Ok(account_list) => to_value(&account_list), Err(_) => Err(Error::internal_error()) } } @@ -54,10 +47,9 @@ impl Personal for PersonalClient { fn new_account(&self, params: Params) -> Result { from_params::<(String, )>(params).and_then( |(pass, )| { - let store_wk = take_weak!(self.secret_store); - let mut store = store_wk.write().unwrap(); + let store = take_weak!(self.accounts); match store.new_account(&pass) { - Ok(address) => Ok(Value::String(format!("{:?}", address))), + Ok(address) => Ok(Value::String(format!("0x{:?}", address))), Err(_) => Err(Error::internal_error()) } } @@ -67,8 +59,7 @@ impl Personal for PersonalClient { fn unlock_account(&self, params: Params) -> Result { from_params::<(Address, String, u64)>(params).and_then( |(account, account_pass, _)|{ - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let store = take_weak!(self.accounts); match store.unlock_account(&account, &account_pass) { Ok(_) => Ok(Value::Bool(true)), Err(_) => Ok(Value::Bool(false)), diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 104a8b3f0..b82a20e89 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -21,9 +21,10 @@ pub mod traits; mod impls; mod types; +mod helpers; + #[cfg(test)] mod tests; -mod helpers; pub use self::traits::{Web3, Eth, EthFilter, Personal, Net}; pub use self::impls::*; diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs new file mode 100644 index 000000000..6bc929709 --- /dev/null +++ b/rpc/src/v1/tests/eth.rs @@ -0,0 +1,350 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; +use jsonrpc_core::IoHandler; +use util::hash::{Address, H256}; +use util::numbers::U256; +use ethcore::client::{TestBlockChainClient, EachBlockWith}; +use v1::{Eth, EthClient}; +use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService, TestExternalMiner}; + +fn blockchain_client() -> Arc { + let client = TestBlockChainClient::new(); + Arc::new(client) +} + +fn accounts_provider() -> Arc { + let mut accounts = HashMap::new(); + accounts.insert(Address::from(1), TestAccount::new("test")); + let ap = TestAccountProvider::new(accounts); + Arc::new(ap) +} + +fn sync_provider() -> Arc { + Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })) +} + +fn miner_service() -> Arc { + Arc::new(TestMinerService) +} + +struct EthTester { + client: Arc, + _sync: Arc, + _accounts_provider: Arc, + _miner: Arc, + hashrates: Arc>>, + pub io: IoHandler, +} + +impl Default for EthTester { + fn default() -> Self { + let client = blockchain_client(); + let sync = sync_provider(); + let ap = accounts_provider(); + let miner = miner_service(); + let hashrates = Arc::new(RwLock::new(HashMap::new())); + let external_miner = TestExternalMiner::new(hashrates.clone()); + let eth = EthClient::new_with_external_miner(&client, &sync, &ap, &miner, external_miner).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(eth); + EthTester { + client: client, + _sync: sync, + _accounts_provider: ap, + _miner: miner, + io: io, + hashrates: hashrates, + } + } +} + +#[test] +fn rpc_eth_protocol_version() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_protocolVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +#[ignore] +fn rpc_eth_syncing() { + unimplemented!() +} + +#[test] +fn rpc_eth_hashrate() { + let tester = EthTester::default(); + tester.hashrates.write().unwrap().insert(H256::from(0), U256::from(0xfffa)); + tester.hashrates.write().unwrap().insert(H256::from(0), U256::from(0xfffb)); + tester.hashrates.write().unwrap().insert(H256::from(1), U256::from(0x1)); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_hashrate", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0xfffc","id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_submit_hashrate() { + let tester = EthTester::default(); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_submitHashrate", + "params": [ + "0x0000000000000000000000000000000000000000000000000000000000500000", + "0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); + assert_eq!(tester.hashrates.read().unwrap().get(&H256::from("0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c")).cloned(), + Some(U256::from(0x500_000))); +} + +#[test] +#[ignore] +fn rpc_eth_author() { + unimplemented!() +} + +#[test] +fn rpc_eth_mining() { + let tester = EthTester::default(); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); + + tester.hashrates.write().unwrap().insert(H256::from(1), U256::from(0x1)); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_gas_price() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_gasPrice", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0ba43b7400","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_accounts() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_block_number() { + let tester = EthTester::default(); + tester.client.add_blocks(10, EachBlockWith::Nothing); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0a","id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_balance() { + let tester = EthTester::default(); + tester.client.set_balance(Address::from(1), U256::from(5)); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBalance", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x05","id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_storage_at() { + let tester = EthTester::default(); + tester.client.set_storage(Address::from(1), H256::from(4), H256::from(7)); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getStorageAt", + "params": ["0x0000000000000000000000000000000000000001", "0x4", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x07","id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_transaction_count() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getTransactionCount", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_block_transaction_count_by_hash() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBlockTransactionCountByHash", + "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_transaction_count_by_number() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBlockTransactionCountByNumber", + "params": ["latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_uncle_count_by_block_hash() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getUncleCountByBlockHash", + "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_uncle_count_by_block_number() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getUncleCountByBlockNumber", + "params": ["latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_code() { + let tester = EthTester::default(); + tester.client.set_code(Address::from(1), vec![0xff, 0x21]); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getCode", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0xff21","id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +#[ignore] +fn rpc_eth_call() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_send_transaction() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_send_raw_transaction() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_sign() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_estimate_gas() { + unimplemented!() +} + +#[test] +fn rpc_eth_compilers() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_getCompilers", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_compile_lll() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileLLL", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_compile_solidity() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSolidity", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_compile_serpent() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSerpent", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + + + diff --git a/rpc/src/v1/tests/helpers/account_provider.rs b/rpc/src/v1/tests/helpers/account_provider.rs new file mode 100644 index 000000000..ce5b76b44 --- /dev/null +++ b/rpc/src/v1/tests/helpers/account_provider.rs @@ -0,0 +1,90 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::RwLock; +use std::collections::HashMap; +use std::io; +use util::hash::{Address, H256}; +use util::crypto::{Secret, Signature}; +use util::keys::store::{AccountProvider, SigningError, EncryptedHashMapError}; + +/// Account mock. +#[derive(Clone)] +pub struct TestAccount { + /// True if account is unlocked. + pub unlocked: bool, + /// Account's password. + pub password: String, +} + +impl TestAccount { + pub fn new(password: &str) -> Self { + TestAccount { + unlocked: false, + password: password.to_owned(), + } + } +} + +/// Test account provider. +pub struct TestAccountProvider { + accounts: RwLock>, + pub adds: RwLock>, +} + +impl TestAccountProvider { + /// Basic constructor. + pub fn new(accounts: HashMap) -> Self { + TestAccountProvider { + accounts: RwLock::new(accounts), + adds: RwLock::new(vec![]), + } + } +} + +impl AccountProvider for TestAccountProvider { + fn accounts(&self) -> Result, io::Error> { + Ok(self.accounts.read().unwrap().keys().cloned().collect()) + } + + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + match self.accounts.write().unwrap().get_mut(account) { + Some(ref mut acc) if acc.password == pass => { + acc.unlocked = true; + Ok(()) + }, + Some(_) => Err(EncryptedHashMapError::InvalidPassword), + None => Err(EncryptedHashMapError::UnknownIdentifier), + } + } + + fn new_account(&self, pass: &str) -> Result { + let mut adds = self.adds.write().unwrap(); + let address = Address::from(adds.len() as u64 + 2); + adds.push(pass.to_owned()); + Ok(address) + } + + fn account_secret(&self, _account: &Address) -> Result { + unimplemented!() + } + + fn sign(&self, _account: &Address, _message: &H256) -> Result { + unimplemented!() + } + +} + diff --git a/rpc/src/v1/tests/helpers/external_miner.rs b/rpc/src/v1/tests/helpers/external_miner.rs new file mode 100644 index 000000000..a5111b302 --- /dev/null +++ b/rpc/src/v1/tests/helpers/external_miner.rs @@ -0,0 +1,48 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; +use util::numbers::U256; +use util::hash::H256; +use v1::helpers::ExternalMinerService; + +/// Test ExternalMinerService; +pub struct TestExternalMiner { + pub hashrates: Arc>> +} + +impl TestExternalMiner { + pub fn new(hashrates: Arc>>) -> Self { + TestExternalMiner { + hashrates: hashrates, + } + } +} + +impl ExternalMinerService for TestExternalMiner { + fn submit_hashrate(&self, hashrate: U256, id: H256) { + self.hashrates.write().unwrap().insert(id, hashrate); + } + + fn hashrate(&self) -> U256 { + self.hashrates.read().unwrap().iter().fold(U256::from(0), |sum, (_, v)| sum + *v) + } + + fn is_mining(&self) -> bool { + !self.hashrates.read().unwrap().is_empty() + } +} diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs new file mode 100644 index 000000000..0cddf2a1e --- /dev/null +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -0,0 +1,53 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::{Address, H256, U256, Bytes}; +use util::standard::*; +use ethcore::error::Error; +use ethcore::client::BlockChainClient; +use ethcore::block::ClosedBlock; +use ethcore::transaction::SignedTransaction; +use ethminer::{MinerService, MinerStatus}; + +pub struct TestMinerService; + +impl MinerService for TestMinerService { + + /// Returns miner's status. + fn status(&self) -> MinerStatus { unimplemented!(); } + + /// Imports transactions to transaction queue. + fn import_transactions(&self, _transactions: Vec, _fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { unimplemented!(); } + + /// Returns hashes of transactions currently in pending + fn pending_transactions_hashes(&self) -> Vec { unimplemented!(); } + + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, _chain: &BlockChainClient) { unimplemented!(); } + + /// Called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) { unimplemented!(); } + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, _chain: &BlockChainClient) { unimplemented!(); } + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, _chain: &BlockChainClient) -> &Mutex> { unimplemented!(); } + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { unimplemented!(); } +} \ No newline at end of file diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs new file mode 100644 index 000000000..fc652e7d6 --- /dev/null +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod account_provider; +mod sync_provider; +mod miner_service; +mod external_miner; + +pub use self::account_provider::{TestAccount, TestAccountProvider}; +pub use self::sync_provider::{Config, TestSyncProvider}; +pub use self::miner_service::{TestMinerService}; +pub use self::external_miner::TestExternalMiner; diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs new file mode 100644 index 000000000..631752dfc --- /dev/null +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -0,0 +1,52 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethsync::{SyncProvider, SyncStatus, SyncState}; + +pub struct Config { + pub protocol_version: u8, + pub num_peers: usize, +} + +pub struct TestSyncProvider { + status: SyncStatus, +} + +impl TestSyncProvider { + pub fn new(config: Config) -> Self { + TestSyncProvider { + status: SyncStatus { + state: SyncState::NotSynced, + protocol_version: config.protocol_version, + start_block_number: 0, + last_imported_block_number: None, + highest_block_number: None, + blocks_total: 0, + blocks_received: 0, + num_peers: config.num_peers, + num_active_peers: 0, + mem_used: 0, + }, + } + } +} + +impl SyncProvider for TestSyncProvider { + fn status(&self) -> SyncStatus { + self.status.clone() + } +} + diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index bdf4567b6..21085a0fd 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -1 +1,23 @@ -//TODO: load custom blockchain state and test +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//!TODO: load custom blockchain state and test + +mod eth; +mod net; +mod web3; +mod helpers; +mod personal; diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs new file mode 100644 index 000000000..e24045ca6 --- /dev/null +++ b/rpc/src/v1/tests/net.rs @@ -0,0 +1,66 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use v1::{Net, NetClient}; +use v1::tests::helpers::{Config, TestSyncProvider}; + +fn sync_provider() -> Arc { + Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })) +} + +#[test] +fn rpc_net_version() { + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_net_peer_count() { + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_net_listening() { + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/personal.rs b/rpc/src/v1/tests/personal.rs new file mode 100644 index 000000000..261527c47 --- /dev/null +++ b/rpc/src/v1/tests/personal.rs @@ -0,0 +1,59 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use v1::tests::helpers::{TestAccount, TestAccountProvider}; +use v1::{PersonalClient, Personal}; +use util::numbers::*; +use std::collections::*; + +fn accounts_provider() -> Arc { + let mut accounts = HashMap::new(); + accounts.insert(Address::from(1), TestAccount::new("test")); + let ap = TestAccountProvider::new(accounts); + Arc::new(ap) +} + +fn setup() -> (Arc, IoHandler) { + let test_provider = accounts_provider(); + let personal = PersonalClient::new(&test_provider); + let io = IoHandler::new(); + io.add_delegate(personal.to_delegate()); + (test_provider, io) +} + +#[test] +fn accounts() { + let (_test_provider, io) = setup(); + + let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + + +#[test] +fn new_account() { + let (_test_provider, io) = setup(); + + let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000002","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + diff --git a/rpc/src/v1/tests/web3.rs b/rpc/src/v1/tests/web3.rs new file mode 100644 index 000000000..c717d361a --- /dev/null +++ b/rpc/src/v1/tests/web3.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::IoHandler; +use util::version; +use v1::{Web3, Web3Client}; + +#[test] +fn rpc_web3_version() { + let web3 = Web3Client::new().to_delegate(); + let io = IoHandler::new(); + io.add_delegate(web3); + + let v = version().to_owned().replace("Parity/", "Parity//"); + + let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref()); + + assert_eq!(io.handle_request(request), Some(response)); +} diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index d2aeb0f9e..8a48e0dfe 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -55,22 +55,34 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Returns block with given number. fn block_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } - + /// Returns the number of transactions sent from given address at given time (block number). fn transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block. - fn block_transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns the number of transactions in a block with given hash. + fn block_transaction_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of uncles in a given block. - fn block_uncles_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns the number of transactions in a block with given block number. + fn block_transaction_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of uncles in a block with given hash. + fn block_uncles_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of uncles in a block with given block number. + fn block_uncles_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the code at given address at given time (block number). fn code_at(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Signs the data with given address signature. + fn sign(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Sends transaction. fn send_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Sends signed transaction. + fn send_raw_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Call contract. fn call(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -90,7 +102,10 @@ pub trait Eth: Sized + Send + Sync + 'static { fn transaction_receipt(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns an uncles at given block and index. - fn uncle_at(&self, _: Params) -> Result { rpc_unimplemented!() } + fn uncle_by_block_hash_and_index(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns an uncles at given block and index. + fn uncle_by_block_number_and_index(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns available compilers. fn compilers(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -127,15 +142,17 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_gasPrice", Eth::gas_price); delegate.add_method("eth_accounts", Eth::accounts); delegate.add_method("eth_blockNumber", Eth::block_number); - delegate.add_method("eth_balance", Eth::balance); + delegate.add_method("eth_getBalance", Eth::balance); delegate.add_method("eth_getStorageAt", Eth::storage_at); delegate.add_method("eth_getTransactionCount", Eth::transaction_count); - delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); - delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); - delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); - delegate.add_method("eth_code", Eth::code_at); + delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); + delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); + delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); + delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); + delegate.add_method("eth_getCode", Eth::code_at); + delegate.add_method("eth_sign", Eth::sign); delegate.add_method("eth_sendTransaction", Eth::send_transaction); + delegate.add_method("eth_sendRawTransaction", Eth::send_raw_transaction); delegate.add_method("eth_call", Eth::call); delegate.add_method("eth_estimateGas", Eth::estimate_gas); delegate.add_method("eth_getBlockByHash", Eth::block_by_hash); @@ -144,8 +161,8 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_getTransactionByBlockHashAndIndex", Eth::transaction_by_block_hash_and_index); delegate.add_method("eth_getTransactionByBlockNumberAndIndex", Eth::transaction_by_block_number_and_index); delegate.add_method("eth_getTransactionReceipt", Eth::transaction_receipt); - delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_at); - delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_at); + delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_by_block_hash_and_index); + delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_by_block_number_and_index); delegate.add_method("eth_getCompilers", Eth::compilers); delegate.add_method("eth_compileLLL", Eth::compile_lll); delegate.add_method("eth_compileSolidity", Eth::compile_solidity); diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index f09f24e4d..0b14c30e8 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -15,10 +15,12 @@ // along with Parity. If not, see . use rustc_serialize::hex::ToHex; -use serde::{Serialize, Serializer}; +use serde::{Serialize, Serializer, Deserialize, Deserializer, Error}; +use serde::de::Visitor; +use util::common::FromHex; /// Wrapper structure around vector of bytes. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct Bytes(Vec); impl Bytes { @@ -26,6 +28,7 @@ impl Bytes { pub fn new(bytes: Vec) -> Bytes { Bytes(bytes) } + pub fn to_vec(self) -> Vec { let Bytes(x) = self; x } } impl Default for Bytes { @@ -36,7 +39,7 @@ impl Default for Bytes { } impl Serialize for Bytes { - fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer { let mut serialized = "0x".to_owned(); serialized.push_str(self.0.to_hex().as_ref()); @@ -44,6 +47,32 @@ impl Serialize for Bytes { } } +impl Deserialize for Bytes { + fn deserialize(deserializer: &mut D) -> Result + where D: Deserializer { + deserializer.deserialize(BytesVisitor) + } +} + +struct BytesVisitor; + +impl Visitor for BytesVisitor { + type Value = Bytes; + + fn visit_str(&mut self, value: &str) -> Result where E: Error { + if value.len() >= 2 && &value[0..2] == "0x" { + Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![]))) + } else { + Err(Error::custom("invalid hex")) + } + } + + fn visit_string(&mut self, value: String) -> Result where E: Error { + self.visit_str(value.as_ref()) + } +} + + #[cfg(test)] mod tests { use super::*; diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 34c1f1cff..ebc3bc0ff 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -23,6 +23,7 @@ mod log; mod optionals; mod sync; mod transaction; +mod transaction_request; pub use self::block::{Block, BlockTransactions}; pub use self::block_number::BlockNumber; @@ -33,3 +34,5 @@ pub use self::log::Log; pub use self::optionals::OptionalValue; pub use self::sync::{SyncStatus, SyncInfo}; pub use self::transaction::Transaction; +pub use self::transaction_request::TransactionRequest; + diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs new file mode 100644 index 000000000..d40402ab5 --- /dev/null +++ b/rpc/src/v1/types/transaction_request.rs @@ -0,0 +1,139 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::hash::Address; +use util::numbers::{Uint, U256}; +use ethcore::transaction::{Action, Transaction}; +use v1::types::Bytes; + +#[derive(Debug, Default, PartialEq, Deserialize)] +pub struct TransactionRequest { + pub from: Address, + pub to: Option
, + #[serde(rename="gasPrice")] + pub gas_price: Option, + pub gas: Option, + pub value: Option, + pub data: Option, + pub nonce: Option, +} + +impl Into for TransactionRequest { + fn into(self) -> Transaction { + Transaction { + nonce: self.nonce.unwrap_or_else(U256::zero), + action: self.to.map_or(Action::Create, Action::Call), + gas: self.gas.unwrap_or_else(U256::zero), + gas_price: self.gas_price.unwrap_or_else(U256::zero), + value: self.value.unwrap_or_else(U256::zero), + data: self.data.map_or_else(Vec::new, |d| d.to_vec()), + } + } +} + +#[cfg(test)] +mod tests { + use serde_json; + use util::numbers::{Uint, U256}; + use util::hash::Address; + use ethcore::transaction::{Transaction, Action}; + use v1::types::Bytes; + use super::*; + + #[test] + fn transaction_request_into_transaction() { + let tr = TransactionRequest { + from: Address::default(), + to: Some(Address::from(10)), + gas_price: Some(U256::from(20)), + gas: Some(U256::from(10_000)), + value: Some(U256::from(1)), + data: Some(Bytes::new(vec![10, 20])), + nonce: Some(U256::from(12)), + }; + + assert_eq!(Transaction { + nonce: U256::from(12), + action: Action::Call(Address::from(10)), + gas: U256::from(10_000), + gas_price: U256::from(20), + value: U256::from(1), + data: vec![10, 20], + }, tr.into()); + } + + #[test] + fn empty_transaction_request_into_transaction() { + let tr = TransactionRequest { + from: Address::default(), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + }; + + assert_eq!(Transaction { + nonce: U256::zero(), + action: Action::Create, + gas: U256::zero(), + gas_price: U256::zero(), + value: U256::zero(), + data: vec![], + }, tr.into()); + } + + #[test] + fn transaction_request_deserialize() { + let s = r#"{ + "from":"0x0000000000000000000000000000000000000001", + "to":"0x0000000000000000000000000000000000000002", + "gasPrice":"0x1", + "gas":"0x2", + "value":"0x3", + "data":"0x123456", + "nonce":"0x4" + }"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, TransactionRequest { + from: Address::from(1), + to: Some(Address::from(2)), + gas_price: Some(U256::from(1)), + gas: Some(U256::from(2)), + value: Some(U256::from(3)), + data: Some(Bytes::new(vec![0x12, 0x34, 0x56])), + nonce: Some(U256::from(4)), + }); + } + + #[test] + fn transaction_request_deserialize_empty() { + let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, TransactionRequest { + from: Address::from(1), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + }); + } +} diff --git a/sync/Cargo.toml b/sync/Cargo.toml index f10a772e3..877f4e6c8 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Ethcore blockchain sync" name = "ethsync" -version = "0.9.99" +version = "1.1.0" license = "GPL-3.0" authors = ["Ethcore , } type RlpResponseResult = Result, PacketDecodeError>; impl ChainSync { /// Create a new instance of syncing strategy. - pub fn new(config: SyncConfig) -> ChainSync { + pub fn new(config: SyncConfig, miner: Arc) -> ChainSync { ChainSync { state: SyncState::NotSynced, starting_block: 0, @@ -231,9 +237,10 @@ impl ChainSync { last_imported_hash: None, syncing_difficulty: U256::from(0u64), have_common_block: false, - last_send_block_number: 0, + last_sent_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, + miner: miner, } } @@ -287,8 +294,6 @@ impl ChainSync { /// Restart sync pub fn restart(&mut self, io: &mut SyncIo) { self.reset(); - self.last_imported_block = None; - self.last_imported_hash = None; self.starting_block = 0; self.highest_block = None; self.have_common_block = false; @@ -354,7 +359,7 @@ impl ChainSync { for i in 0..item_count { let info: BlockHeader = try!(r.val_at(i)); let number = BlockNumber::from(info.number); - if number <= self.current_base_block() || self.headers.have_item(&number) { + if (number <= self.current_base_block() && self.have_common_block) || self.headers.have_item(&number) { trace!(target: "sync", "Skipping existing block header"); continue; } @@ -364,11 +369,17 @@ impl ChainSync { } let hash = info.hash(); match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { - self.have_common_block = true; - self.last_imported_block = Some(number); - self.last_imported_hash = Some(hash.clone()); - trace!(target: "sync", "Found common header {} ({})", number, hash); + BlockStatus::InChain | BlockStatus::Queued => { + if !self.have_common_block || self.current_base_block() < number { + self.last_imported_block = Some(number); + self.last_imported_hash = Some(hash.clone()); + } + if !self.have_common_block { + self.have_common_block = true; + trace!(target: "sync", "Found common header {} ({})", number, hash); + } else { + trace!(target: "sync", "Header already in chain {} ({})", number, hash); + } }, _ => { if self.have_common_block { @@ -462,6 +473,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); @@ -484,7 +496,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } @@ -575,7 +587,7 @@ impl ChainSync { pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}", peer); if let Err(e) = self.send_status(io) { - warn!(target:"sync", "Error sending status request: {:?}", e); + debug!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } } @@ -643,10 +655,7 @@ impl ChainSync { let mut needed_numbers: Vec = Vec::new(); if self.have_common_block && !self.headers.is_empty() && self.headers.range_iter().next().unwrap().0 == self.current_base_block() + 1 { - for (start, ref items) in self.headers.range_iter() { - if needed_bodies.len() >= MAX_BODIES_TO_REQUEST { - break; - } + if let Some((start, ref items)) = self.headers.range_iter().next() { let mut index: BlockNumber = 0; while index != items.len() as BlockNumber && needed_bodies.len() < MAX_BODIES_TO_REQUEST { let block = start + index; @@ -690,7 +699,10 @@ impl ChainSync { if !self.have_common_block { // download backwards until common block is found 1 header at a time let chain_info = io.chain().chain_info(); - start = chain_info.best_block_number; + start = match self.last_imported_block { + Some(n) => n, + None => chain_info.best_block_number, + }; if !self.headers.is_empty() { start = min(start, self.headers.range_iter().next().unwrap().0 - 1); } @@ -831,20 +843,14 @@ impl ChainSync { /// Remove downloaded bocks/headers starting from specified number. /// Used to recover from an error and re-download parts of the chain detected as bad. fn remove_downloaded_blocks(&mut self, start: BlockNumber) { - for n in self.headers.get_tail(&start) { - if let Some(ref header_data) = self.headers.find_item(&n) { - let header_to_delete = HeaderView::new(&header_data.data); - let header_id = HeaderId { - transactions_root: header_to_delete.transactions_root(), - uncles: header_to_delete.uncles_hash() - }; - self.header_ids.remove(&header_id); - } - self.downloading_bodies.remove(&n); - self.downloading_headers.remove(&n); - } - self.headers.remove_tail(&start); - self.bodies.remove_tail(&start); + let ids = self.header_ids.drain().filter(|&(_, v)| v < start).collect(); + self.header_ids = ids; + let hdrs = self.downloading_headers.drain().filter(|v| *v < start).collect(); + self.downloading_headers = hdrs; + let bodies = self.downloading_bodies.drain().filter(|v| *v < start).collect(); + self.downloading_bodies = bodies; + self.headers.remove_from(&start); + self.bodies.remove_from(&start); } /// Request headers from a peer by block hash @@ -919,8 +925,19 @@ impl ChainSync { } } /// Called when peer sends us new transactions - fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - Ok(()) + fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + let item_count = r.item_count(); + trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); + + let mut transactions = Vec::with_capacity(item_count); + for i in 0..item_count { + let tx: SignedTransaction = try!(r.val_at(i)); + transactions.push(tx); + } + let chain = io.chain(); + let fetch_nonce = |a: &Address| chain.nonce(a); + let _ = self.miner.import_transactions(transactions, fetch_nonce); + Ok(()) } /// Send Status message @@ -1037,17 +1054,20 @@ impl ChainSync { debug!(target: "sync", "Empty GetReceipts request, ignoring."); return Ok(None); } - count = min(count, MAX_RECEIPTS_TO_SEND); - let mut added = 0usize; + count = min(count, MAX_RECEIPTS_HEADERS_TO_SEND); + let mut added_headers = 0usize; + let mut added_receipts = 0usize; let mut data = Bytes::new(); for i in 0..count { - if let Some(mut hdr) = io.chain().block_receipts(&try!(rlp.val_at::(i))) { - data.append(&mut hdr); - added += 1; + if let Some(mut receipts_bytes) = io.chain().block_receipts(&try!(rlp.val_at::(i))) { + data.append(&mut receipts_bytes); + added_receipts += receipts_bytes.len(); + added_headers += 1; + if added_receipts > MAX_RECEIPTS_TO_SEND { break; } } } - let mut rlp_result = RlpStream::new_list(added); - rlp_result.append_raw(&data, added); + let mut rlp_result = RlpStream::new_list(added_headers); + rlp_result.append_raw(&data, added_headers); Ok(Some((RECEIPTS_PACKET, rlp_result))) } @@ -1072,7 +1092,7 @@ impl ChainSync { let rlp = UntrustedRlp::new(data); if packet_id != STATUS_PACKET && !self.peers.contains_key(&peer) { - warn!(target:"sync", "Unexpected packet from unregistered peer: {}:{}", peer, io.peer_info(peer)); + debug!(target:"sync", "Unexpected packet from unregistered peer: {}:{}", peer, io.peer_info(peer)); return; } let result = match packet_id { @@ -1229,22 +1249,34 @@ impl ChainSync { sent } + fn propagate_latest_blocks(&mut self, io: &mut SyncIo) { + let chain_info = io.chain().chain_info(); + if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { + let blocks = self.propagate_blocks(&chain_info, io); + let hashes = self.propagate_new_hashes(&chain_info, io); + if blocks != 0 || hashes != 0 { + trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); + } + } + self.last_sent_block_number = chain_info.best_block_number; + } + /// Maintain other peers. Send out any new blocks and transactions pub fn maintain_sync(&mut self, io: &mut SyncIo) { self.check_resume(io); } - /// should be called once chain has new block, triggers the latest block propagation - pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) { - let chain = io.chain().chain_info(); - if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let blocks = self.propagate_blocks(&chain, io); - let hashes = self.propagate_new_hashes(&chain, io); - if blocks != 0 || hashes != 0 { - trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); - } - } - self.last_send_block_number = chain.best_block_number; + /// called when block is imported to chain, updates transactions queue and propagates the blocks + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) { + // Notify miner + self.miner.chain_new_blocks(io.chain(), imported, invalid, enacted, retracted); + // Propagate latests blocks + self.propagate_latest_blocks(io); + // TODO [todr] propagate transactions? + } + + pub fn chain_new_head(&mut self, io: &mut SyncIo) { + self.miner.prepare_sealing(io.chain()); } } @@ -1257,6 +1289,7 @@ mod tests { use super::{PeerInfo, PeerAsking}; use ethcore::header::*; use ethcore::client::*; + use ethminer::{Miner, MinerService}; fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); @@ -1329,7 +1362,7 @@ mod tests { assert!(rlp_result.is_some()); // the length of two rlp-encoded receipts - assert_eq!(597, rlp_result.unwrap().1.out().len()); + assert_eq!(603, rlp_result.unwrap().1.out().len()); let mut sync = dummy_sync_with_peer(H256::new()); io.sender = Some(2usize); @@ -1366,7 +1399,7 @@ mod tests { } fn dummy_sync_with_peer(peer_latest_hash: H256) -> ChainSync { - let mut sync = ChainSync::new(SyncConfig::default()); + let mut sync = ChainSync::new(SyncConfig::default(), Miner::new()); sync.peers.insert(0, PeerInfo { protocol_version: 0, @@ -1386,7 +1419,7 @@ mod tests { #[test] fn finds_lagging_peers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10)); let chain_info = client.chain_info(); @@ -1400,7 +1433,7 @@ mod tests { #[test] fn calculates_tree_for_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(15, false); + client.add_blocks(15, EachBlockWith::Uncle); let start = client.block_hash_delta_minus(4); let end = client.block_hash_delta_minus(2); @@ -1417,7 +1450,7 @@ mod tests { #[test] fn sends_new_hashes_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1436,7 +1469,7 @@ mod tests { #[test] fn sends_latest_block_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1454,7 +1487,7 @@ mod tests { #[test] fn handles_peer_new_block_mallformed() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let block_data = get_dummy_block(11, client.chain_info().best_block_hash); @@ -1472,7 +1505,7 @@ mod tests { #[test] fn handles_peer_new_block() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); @@ -1490,7 +1523,7 @@ mod tests { #[test] fn handles_peer_new_block_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1506,7 +1539,7 @@ mod tests { #[test] fn handles_peer_new_hashes() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1522,7 +1555,7 @@ mod tests { #[test] fn handles_peer_new_hashes_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1540,7 +1573,7 @@ mod tests { #[test] fn hashes_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1558,7 +1591,7 @@ mod tests { #[test] fn block_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1571,10 +1604,37 @@ mod tests { assert!(result.is_ok()); } + #[test] + fn should_add_transactions_to_queue() { + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); + + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + + let mut queue = VecDeque::new(); + let mut io = TestIo::new(&mut client, &mut queue, None); + + // when + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks); + assert_eq!(sync.miner.status().transaction_queue_future, 0); + assert_eq!(sync.miner.status().transaction_queue_pending, 1); + sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks); + + // then + let status = sync.miner.status(); + assert_eq!(status.transaction_queue_pending, 1); + assert_eq!(status.transaction_queue_future, 0); + } + #[test] fn returns_requested_block_headers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); @@ -1598,7 +1658,7 @@ mod tests { #[test] fn returns_requested_block_headers_reverse() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..1c87da2de 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -17,9 +17,10 @@ #![warn(missing_docs)] #![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", plugin(clippy))] - // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(feature="dev", allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(feature="dev", allow(if_not_else))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: @@ -31,18 +32,21 @@ //! extern crate ethcore_util as util; //! extern crate ethcore; //! extern crate ethsync; +//! extern crate ethminer; //! use std::env; //! use std::sync::Arc; //! use util::network::{NetworkService, NetworkConfiguration}; //! use ethcore::client::{Client, ClientConfig}; //! use ethsync::{EthSync, SyncConfig}; +//! use ethminer::Miner; //! use ethcore::ethereum; //! //! fn main() { //! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); //! let dir = env::temp_dir(); //! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); -//! EthSync::register(&mut service, SyncConfig::default(), client); +//! let miner = Miner::new(); +//! EthSync::register(&mut service, SyncConfig::default(), client, miner); //! } //! ``` @@ -51,6 +55,7 @@ extern crate log; #[macro_use] extern crate ethcore_util as util; extern crate ethcore; +extern crate ethminer; extern crate env_logger; extern crate time; extern crate rand; @@ -59,19 +64,18 @@ extern crate heapsize; use std::ops::*; use std::sync::*; -use ethcore::client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use util::TimerToken; use util::{U256, ONE_U256}; -use chain::ChainSync; +use ethcore::client::Client; use ethcore::service::SyncMessage; +use ethminer::Miner; use io::NetSyncIo; +use chain::ChainSync; mod chain; mod io; mod range_collection; -// TODO [todr] Made public to suppress dead code warnings -pub mod transaction_queue; #[cfg(test)] mod tests; @@ -93,6 +97,12 @@ impl Default for SyncConfig { } } +/// Current sync status +pub trait SyncProvider: Send + Sync { + /// Get sync status + fn status(&self) -> SyncStatus; +} + /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint @@ -105,20 +115,15 @@ pub use self::chain::{SyncStatus, SyncState}; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc) -> Arc { + pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc, miner: Arc) -> Arc { let sync = Arc::new(EthSync { chain: chain, - sync: RwLock::new(ChainSync::new(config)), + sync: RwLock::new(ChainSync::new(config, miner)), }); service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); sync } - /// Get sync status - pub fn status(&self) -> SyncStatus { - self.sync.read().unwrap().status() - } - /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref())); @@ -130,6 +135,13 @@ impl EthSync { } } +impl SyncProvider for EthSync { + /// Get sync status + fn status(&self) -> SyncStatus { + self.sync.read().unwrap().status() + } +} + impl NetworkProtocolHandler for EthSync { fn initialize(&self, io: &NetworkContext) { io.register_timer(0, 1000).expect("Error registering sync timer"); @@ -153,8 +165,16 @@ impl NetworkProtocolHandler for EthSync { } fn message(&self, io: &NetworkContext, message: &SyncMessage) { - if let SyncMessage::BlockVerified = *message { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); + match *message { + SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted } => { + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted); + }, + SyncMessage::NewChainHead => { + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_head(&mut sync_io); + } + _ => {/* Ignore other messages */}, } } } diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index dc2f4e446..0628df401 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -42,6 +42,8 @@ pub trait RangeCollection { fn remove_head(&mut self, start: &K); /// Remove all elements >= `start` in the range that contains `start` fn remove_tail(&mut self, start: &K); + /// Remove all elements >= `start` + fn remove_from(&mut self, start: &K); /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); /// Get an iterator over ranges @@ -137,6 +139,28 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } } + /// Remove the element and all following it. + fn remove_from(&mut self, key: &K) { + match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) { + Ok(index) => { self.drain(.. index + 1); }, + Err(index) =>{ + let mut empty = false; + match self.get_mut(index) { + Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => { + v.truncate((*key - *k).to_usize()); + empty = v.is_empty(); + } + _ => {} + } + if empty { + self.drain(.. index + 1); + } else { + self.drain(.. index); + } + }, + } + } + /// Remove range elements up to key fn remove_head(&mut self, key: &K) { if *key == FromUsize::from_usize(0) { @@ -272,5 +296,22 @@ fn test_range() { assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); r.remove_tail(&2); assert_eq!(r.range_iter().next(), None); + + let mut r = ranges.clone(); + r.remove_from(&20); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal); + r.remove_from(&18); + assert!(!r.have_item(&18)); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal); + r.remove_from(&16); + assert!(!r.have_item(&16)); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal); + r.remove_from(&3); + assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); + r.remove_from(&1); + assert_eq!(r.range_iter().next(), None); + let mut r = ranges.clone(); + r.remove_from(&2); + assert_eq!(r.range_iter().next(), None); } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index b01c894a0..eebbdb164 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockId}; +use ethcore::client::{BlockChainClient, BlockId, EachBlockWith}; use io::SyncIo; use chain::{SyncState}; use super::helpers::*; @@ -24,8 +24,8 @@ use super::helpers::*; fn two_peers() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); @@ -35,8 +35,8 @@ fn two_peers() { fn status_after_sync() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync(); let status = net.peer(0).sync.status(); assert_eq!(status.state, SyncState::Idle); @@ -45,8 +45,8 @@ fn status_after_sync() { #[test] fn takes_few_steps() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(100, false); - net.peer_mut(2).chain.add_blocks(100, false); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Uncle); let total_steps = net.sync(); assert!(total_steps < 7); } @@ -56,8 +56,9 @@ fn empty_blocks() { ::env_logger::init().ok(); let mut net = TestNet::new(3); for n in 0..200 { - net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); - net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); + let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle }; + net.peer_mut(1).chain.add_blocks(5, with.clone()); + net.peer_mut(2).chain.add_blocks(5, with); } net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); @@ -68,14 +69,14 @@ fn empty_blocks() { fn forked() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(0).chain.add_blocks(300, false); - net.peer_mut(1).chain.add_blocks(300, false); - net.peer_mut(2).chain.add_blocks(300, false); - net.peer_mut(0).chain.add_blocks(100, true); //fork - net.peer_mut(1).chain.add_blocks(200, false); - net.peer_mut(2).chain.add_blocks(200, false); - net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 - net.peer_mut(2).chain.add_blocks(10, true); + net.peer_mut(0).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Nothing); //fork + net.peer_mut(1).chain.add_blocks(200, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); //fork between 1 and 2 + net.peer_mut(2).chain.add_blocks(10, EachBlockWith::Nothing); // peer 1 has the best chain of 601 blocks let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); net.sync(); @@ -87,8 +88,8 @@ fn forked() { #[test] fn restart() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync_steps(8); @@ -109,8 +110,8 @@ fn status_empty() { #[test] fn status_packet() { let mut net = TestNet::new(2); - net.peer_mut(0).chain.add_blocks(100, false); - net.peer_mut(1).chain.add_blocks(1, false); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(1, EachBlockWith::Uncle); net.start(); @@ -123,13 +124,13 @@ fn status_packet() { #[test] fn propagate_hashes() { let mut net = TestNet::new(6); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.peer_mut(0).chain.add_blocks(10, false); + net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); // 5 peers to sync assert_eq!(5, net.peer(0).queue.len()); @@ -149,12 +150,12 @@ fn propagate_hashes() { #[test] fn propagate_blocks() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.peer_mut(0).chain.add_blocks(10, false); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); assert!(!net.peer(0).queue.is_empty()); // NEW_BLOCK_PACKET @@ -164,7 +165,7 @@ fn propagate_blocks() { #[test] fn restart_on_malformed_block() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.peer_mut(1).chain.corrupt_block(6); net.sync_steps(10); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index e170a4a85..b3e62ccc6 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -15,265 +15,11 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo}; -use ethcore::header::{Header as BlockHeader, BlockNumber}; -use ethcore::error::*; +use ethcore::client::{TestBlockChainClient, BlockChainClient}; use io::SyncIo; use chain::ChainSync; +use ethminer::Miner; use ::SyncConfig; -use ethcore::receipt::Receipt; -use ethcore::transaction::LocalizedTransaction; -use ethcore::filter::Filter; -use ethcore::log_entry::LocalizedLogEntry; - -pub struct TestBlockChainClient { - pub blocks: RwLock>, - pub numbers: RwLock>, - pub genesis_hash: H256, - pub last_hash: RwLock, - pub difficulty: RwLock, -} - -impl TestBlockChainClient { - pub fn new() -> TestBlockChainClient { - - let mut client = TestBlockChainClient { - blocks: RwLock::new(HashMap::new()), - numbers: RwLock::new(HashMap::new()), - genesis_hash: H256::new(), - last_hash: RwLock::new(H256::new()), - difficulty: RwLock::new(From::from(0)), - }; - client.add_blocks(1, true); // add genesis block - client.genesis_hash = client.last_hash.read().unwrap().clone(); - client - } - - pub fn add_blocks(&mut self, count: usize, empty: bool) { - let len = self.numbers.read().unwrap().len(); - for n in len..(len + count) { - let mut header = BlockHeader::new(); - header.difficulty = From::from(n); - header.parent_hash = self.last_hash.read().unwrap().clone(); - header.number = n as BlockNumber; - let mut uncles = RlpStream::new_list(if empty {0} else {1}); - if !empty { - let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); - uncle_header.number = n as BlockNumber; - uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); - } - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&rlp::NULL_RLP, 1); - rlp.append_raw(uncles.as_raw(), 1); - self.import_block(rlp.as_raw().to_vec()).unwrap(); - } - } - - pub fn corrupt_block(&mut self, n: BlockNumber) { - let hash = self.block_hash(BlockId::Number(n)).unwrap(); - let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); - header.parent_hash = H256::new(); - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&rlp::NULL_RLP, 1); - rlp.append_raw(&rlp::NULL_RLP, 1); - self.blocks.write().unwrap().insert(hash, rlp.out()); - } - - pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { - let blocks_read = self.numbers.read().unwrap(); - let index = blocks_read.len() - delta; - blocks_read[&index].clone() - } - - fn block_hash(&self, id: BlockId) -> Option { - match id { - BlockId::Hash(hash) => Some(hash), - BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(), - BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(), - BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned() - } - } -} - -impl BlockChainClient for TestBlockChainClient { - fn block_total_difficulty(&self, _id: BlockId) -> Option { - Some(U256::zero()) - } - - fn block_hash(&self, _id: BlockId) -> Option { - unimplemented!(); - } - - fn code(&self, _address: &Address) -> Option { - unimplemented!(); - } - - fn transaction(&self, _id: TransactionId) -> Option { - unimplemented!(); - } - - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { - unimplemented!(); - } - - fn logs(&self, _filter: Filter) -> Vec { - unimplemented!(); - } - - fn block_header(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) - } - - fn block_body(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| { - let mut stream = RlpStream::new_list(2); - stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); - stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); - stream.out() - })) - } - - fn block(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned()) - } - - fn block_status(&self, id: BlockId) -> BlockStatus { - match id { - BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain, - BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain, - _ => BlockStatus::Unknown - } - } - - // works only if blocks are one after another 1 -> 2 -> 3 - fn tree_route(&self, from: &H256, to: &H256) -> Option { - Some(TreeRoute { - ancestor: H256::new(), - index: 0, - blocks: { - let numbers_read = self.numbers.read().unwrap(); - let mut adding = false; - - let mut blocks = Vec::new(); - for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { - if hash == to { - if adding { - blocks.push(hash.clone()); - } - adding = false; - break; - } - if hash == from { - adding = true; - } - if adding { - blocks.push(hash.clone()); - } - } - if adding { Vec::new() } else { blocks } - } - }) - } - - // TODO: returns just hashes instead of node state rlp(?) - fn state_data(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let mut rlp = RlpStream::new(); - rlp.append(&hash.clone()); - return Some(rlp.out()); - } - None - } - - fn block_receipts(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let receipt = Receipt::new( - H256::zero(), - U256::zero(), - vec![]); - let mut rlp = RlpStream::new(); - rlp.append(&receipt); - return Some(rlp.out()); - } - None - } - - fn import_block(&self, b: Bytes) -> ImportResult { - let header = Rlp::new(&b).val_at::(0); - let h = header.hash(); - let number: usize = header.number as usize; - if number > self.blocks.read().unwrap().len() { - panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); - } - if number > 0 { - match self.blocks.read().unwrap().get(&header.parent_hash) { - Some(parent) => { - let parent = Rlp::new(parent).val_at::(0); - if parent.number != (header.number - 1) { - panic!("Unexpected block parent"); - } - }, - None => { - panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); - } - } - } - let len = self.numbers.read().unwrap().len(); - if number == len { - { - let mut difficulty = self.difficulty.write().unwrap(); - *difficulty.deref_mut() = *difficulty.deref() + header.difficulty; - } - mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); - self.blocks.write().unwrap().insert(h.clone(), b); - self.numbers.write().unwrap().insert(number, h.clone()); - let mut parent_hash = header.parent_hash; - if number > 0 { - let mut n = number - 1; - while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { - *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); - n -= 1; - parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; - } - } - } - else { - self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); - } - Ok(h) - } - - fn queue_info(&self) -> BlockQueueInfo { - BlockQueueInfo { - verified_queue_size: 0, - unverified_queue_size: 0, - verifying_queue_size: 0, - max_queue_size: 0, - max_mem_use: 0, - mem_used: 0, - } - } - - fn clear_queue(&self) { - } - - fn chain_info(&self) -> BlockChainInfo { - BlockChainInfo { - total_difficulty: *self.difficulty.read().unwrap(), - pending_total_difficulty: *self.difficulty.read().unwrap(), - genesis_hash: self.genesis_hash.clone(), - best_block_hash: self.last_hash.read().unwrap().clone(), - best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, - } - } -} pub struct TestIo<'p> { pub chain: &'p mut TestBlockChainClient, @@ -347,7 +93,7 @@ impl TestNet { for _ in 0..n { net.peers.push(TestPeer { chain: TestBlockChainClient::new(), - sync: ChainSync::new(SyncConfig::default()), + sync: ChainSync::new(SyncConfig::default(), Miner::new()), queue: VecDeque::new(), }); } @@ -420,8 +166,8 @@ impl TestNet { self.peers.iter().all(|p| p.queue.is_empty()) } - pub fn trigger_block_verified(&mut self, peer_id: usize) { + pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[]); } } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs deleted file mode 100644 index 4f5622a2f..000000000 --- a/sync/src/transaction_queue.rs +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -// TODO [todr] - own transactions should have higher priority - -//! Transaction Queue - -use std::cmp::{Ordering}; -use std::collections::{HashMap, BTreeSet}; -use util::numbers::{Uint, U256}; -use util::hash::{Address, H256}; -use util::table::*; -use ethcore::transaction::*; - - -#[derive(Clone, Debug)] -struct TransactionOrder { - nonce_height: U256, - gas_price: U256, - hash: H256, -} - -impl TransactionOrder { - fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { - TransactionOrder { - nonce_height: tx.nonce() - base_nonce, - gas_price: tx.transaction.gas_price, - hash: tx.hash(), - } - } - - fn update_height(mut self, nonce: U256, base_nonce: U256) -> Self { - self.nonce_height = nonce - base_nonce; - self - } -} - -impl Eq for TransactionOrder {} -impl PartialEq for TransactionOrder { - fn eq(&self, other: &TransactionOrder) -> bool { - self.cmp(other) == Ordering::Equal - } -} -impl PartialOrd for TransactionOrder { - fn partial_cmp(&self, other: &TransactionOrder) -> Option { - Some(self.cmp(other)) - } -} -impl Ord for TransactionOrder { - fn cmp(&self, b: &TransactionOrder) -> Ordering { - // First check nonce_height - if self.nonce_height != b.nonce_height { - return self.nonce_height.cmp(&b.nonce_height); - } - - // Then compare gas_prices - let a_gas = self.gas_price; - let b_gas = b.gas_price; - if a_gas != b_gas { - return a_gas.cmp(&b_gas); - } - - // Compare hashes - self.hash.cmp(&b.hash) - } -} - -struct VerifiedTransaction { - transaction: SignedTransaction -} -impl VerifiedTransaction { - fn new(transaction: SignedTransaction) -> Self { - VerifiedTransaction { - transaction: transaction - } - } - - fn hash(&self) -> H256 { - self.transaction.hash() - } - - fn nonce(&self) -> U256 { - self.transaction.nonce - } - - fn sender(&self) -> Address { - self.transaction.sender().unwrap() - } -} - -struct TransactionSet { - by_priority: BTreeSet, - by_address: Table, - limit: usize, -} - -impl TransactionSet { - fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) { - self.by_priority.insert(order.clone()); - self.by_address.insert(sender, nonce, order); - } - - fn enforce_limit(&mut self, by_hash: &HashMap) { - let len = self.by_priority.len(); - if len <= self.limit { - return; - } - - let to_drop : Vec<&VerifiedTransaction> = { - self.by_priority - .iter() - .skip(self.limit) - .map(|order| by_hash.get(&order.hash).expect("Inconsistency in queue detected.")) - .collect() - }; - - for tx in to_drop { - self.drop(&tx.sender(), &tx.nonce()); - } - } - - fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { - if let Some(tx_order) = self.by_address.remove(sender, nonce) { - self.by_priority.remove(&tx_order); - return Some(tx_order); - } - None - } - - fn clear(&mut self) { - self.by_priority.clear(); - self.by_address.clear(); - } -} - -#[derive(Debug)] -/// Current status of the queue -pub struct TransactionQueueStatus { - /// Number of pending transactions (ready to go to block) - pub pending: usize, - /// Number of future transactions (waiting for transactions with lower nonces first) - pub future: usize, -} - -/// TransactionQueue implementation -pub struct TransactionQueue { - /// Priority queue for transactions that can go to block - current: TransactionSet, - /// Priority queue for transactions that has been received but are not yet valid to go to block - future: TransactionSet, - /// All transactions managed by queue indexed by hash - by_hash: HashMap, - /// Last nonce of transaction in current (to quickly check next expected transaction) - last_nonces: HashMap, -} - -impl TransactionQueue { - /// Creates new instance of this Queue - pub fn new() -> Self { - Self::with_limits(1024, 1024) - } - - /// Create new instance of this Queue with specified limits - pub fn with_limits(current_limit: usize, future_limit: usize) -> Self { - let current = TransactionSet { - by_priority: BTreeSet::new(), - by_address: Table::new(), - limit: current_limit, - }; - let future = TransactionSet { - by_priority: BTreeSet::new(), - by_address: Table::new(), - limit: future_limit, - }; - - TransactionQueue { - current: current, - future: future, - by_hash: HashMap::new(), - last_nonces: HashMap::new(), - } - } - - /// Returns current status for this queue - pub fn status(&self) -> TransactionQueueStatus { - TransactionQueueStatus { - pending: self.current.by_priority.len(), - future: self.future.by_priority.len(), - } - } - - /// Adds all signed transactions to queue to be verified and imported - pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) - where T: Fn(&Address) -> U256 { - for tx in txs.into_iter() { - self.add(tx, &fetch_nonce); - } - } - - /// Add signed transaction to queue to be verified and imported - pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) - where T: Fn(&Address) -> U256 { - self.import_tx(VerifiedTransaction::new(tx), fetch_nonce); - } - - /// Removes all transactions identified by hashes given in slice - /// - /// If gap is introduced marks subsequent transactions as future - pub fn remove_all(&mut self, txs: &[H256], fetch_nonce: T) - where T: Fn(&Address) -> U256 { - for tx in txs { - self.remove(&tx, &fetch_nonce); - } - } - - /// Removes transaction identified by hashes from queue. - /// - /// If gap is introduced marks subsequent transactions as future - pub fn remove(&mut self, hash: &H256, fetch_nonce: &T) - where T: Fn(&Address) -> U256 { - let transaction = self.by_hash.remove(hash); - if transaction.is_none() { - // We don't know this transaction - return; - } - let transaction = transaction.unwrap(); - let sender = transaction.sender(); - let nonce = transaction.nonce(); - - println!("Removing tx: {:?}", transaction.transaction); - // Remove from future - self.future.drop(&sender, &nonce); - - // Remove from current - let order = self.current.drop(&sender, &nonce); - if order.is_none() { - return; - } - - // Let's remove transactions where tx.nonce < current_nonce - // and if there are any future transactions matching current_nonce+1 - move to current - let current_nonce = fetch_nonce(&sender); - // We will either move transaction to future or remove it completely - // so there will be no transactions from this sender in current - self.last_nonces.remove(&sender); - - let all_nonces_from_sender = match self.current.by_address.row(&sender) { - Some(row_map) => row_map.keys().cloned().collect::>(), - None => vec![], - }; - - for k in all_nonces_from_sender { - // Goes to future or is removed - let order = self.current.drop(&sender, &k).unwrap(); - if k >= current_nonce { - println!("Moving to future: {:?}", order); - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); - } else { - self.by_hash.remove(&order.hash); - } - } - self.future.enforce_limit(&self.by_hash); - - // And now lets check if there is some chain of transactions in future - // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce - U256::one(), current_nonce) { - self.last_nonces.insert(sender, new_current_top); - } - } - - /// Returns top transactions from the queue - pub fn top_transactions(&self, size: usize) -> Vec { - self.current.by_priority - .iter() - .take(size) - .map(|t| self.by_hash.get(&t.hash).expect("Transaction Queue Inconsistency")) - .map(|t| t.transaction.clone()) - .collect() - } - - /// Removes all elements (in any state) from the queue - pub fn clear(&mut self) { - self.current.clear(); - self.future.clear(); - self.by_hash.clear(); - self.last_nonces.clear(); - } - - fn move_future_txs(&mut self, address: Address, current_nonce: U256, first_nonce: U256) -> Option { - println!("Moving from future for: {:?} base: {:?}", current_nonce, first_nonce); - let mut current_nonce = current_nonce + U256::one(); - { - let by_nonce = self.future.by_address.row_mut(&address); - if let None = by_nonce { - return None; - } - let mut by_nonce = by_nonce.unwrap(); - while let Some(order) = by_nonce.remove(¤t_nonce) { - // remove also from priority and hash - self.future.by_priority.remove(&order); - // Put to current - println!("Moved: {:?}", order); - let order = order.update_height(current_nonce.clone(), first_nonce); - self.current.insert(address.clone(), current_nonce, order); - current_nonce = current_nonce + U256::one(); - } - } - self.future.by_address.clear_if_empty(&address); - // Returns last inserted nonce - Some(current_nonce - U256::one()) - } - - fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) - where T: Fn(&Address) -> U256 { - let nonce = tx.nonce(); - let address = tx.sender(); - - let next_nonce = self.last_nonces - .get(&address) - .cloned() - .map_or_else(|| fetch_nonce(&address), |n| n + U256::one()); - - println!("Expected next: {:?}, got: {:?}", next_nonce, nonce); - // Check height - if nonce > next_nonce { - let order = TransactionOrder::for_transaction(&tx, next_nonce); - // Insert to by_hash - self.by_hash.insert(tx.hash(), tx); - // We have a gap - put to future - self.future.insert(address, nonce, order); - self.future.enforce_limit(&self.by_hash); - return; - } else if next_nonce > nonce { - // Droping transaction - return; - } - - let base_nonce = fetch_nonce(&address); - let order = TransactionOrder::for_transaction(&tx, base_nonce); - // Insert to by_hash - self.by_hash.insert(tx.hash(), tx); - - // Insert to current - self.current.insert(address.clone(), nonce, order); - // But maybe there are some more items waiting in future? - let new_last_nonce = self.move_future_txs(address.clone(), nonce, base_nonce); - self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); - // Enforce limit - self.current.enforce_limit(&self.by_hash); - } -} - - -#[cfg(test)] -mod test { - extern crate rustc_serialize; - use self::rustc_serialize::hex::FromHex; - use std::collections::{HashMap, BTreeSet}; - use util::crypto::KeyPair; - use util::numbers::{U256, Uint}; - use util::hash::{Address}; - use util::table::*; - use ethcore::transaction::*; - use super::*; - use super::{TransactionSet, TransactionOrder, VerifiedTransaction}; - - fn new_unsigned_tx(nonce: U256) -> Transaction { - Transaction { - action: Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::one(), - nonce: nonce - } - } - - fn new_tx() -> SignedTransaction { - let keypair = KeyPair::create().unwrap(); - new_unsigned_tx(U256::from(123)).sign(&keypair.secret()) - } - - fn default_nonce(_address: &Address) -> U256 { - U256::from(123) - } - - fn new_txs(second_nonce: U256) -> (SignedTransaction, SignedTransaction) { - let keypair = KeyPair::create().unwrap(); - let secret = &keypair.secret(); - let nonce = U256::from(123); - let tx = new_unsigned_tx(nonce); - let tx2 = new_unsigned_tx(nonce + second_nonce); - - (tx.sign(secret), tx2.sign(secret)) - } - - #[test] - fn should_create_transaction_set() { - // given - let mut set = TransactionSet { - by_priority: BTreeSet::new(), - by_address: Table::new(), - limit: 1 - }; - let (tx1, tx2) = new_txs(U256::from(1)); - let tx1 = VerifiedTransaction::new(tx1); - let tx2 = VerifiedTransaction::new(tx2); - let by_hash = { - let mut x = HashMap::new(); - let tx1 = VerifiedTransaction::new(tx1.transaction.clone()); - let tx2 = VerifiedTransaction::new(tx2.transaction.clone()); - x.insert(tx1.hash(), tx1); - x.insert(tx2.hash(), tx2); - x - }; - // Insert both transactions - let order1 = TransactionOrder::for_transaction(&tx1, U256::zero()); - set.insert(tx1.sender(), tx1.nonce(), order1.clone()); - let order2 = TransactionOrder::for_transaction(&tx2, U256::zero()); - set.insert(tx2.sender(), tx2.nonce(), order2.clone()); - assert_eq!(set.by_priority.len(), 2); - assert_eq!(set.by_address.len(), 2); - - // when - set.enforce_limit(&by_hash); - - // then - assert_eq!(set.by_priority.len(), 1); - assert_eq!(set.by_address.len(), 1); - assert_eq!(set.by_priority.iter().next().unwrap().clone(), order1); - set.clear(); - assert_eq!(set.by_priority.len(), 0); - assert_eq!(set.by_address.len(), 0); - } - - - #[test] - fn should_import_tx() { - // given - let mut txq = TransactionQueue::new(); - let tx = new_tx(); - - // when - txq.add(tx, &default_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.pending, 1); - } - - #[test] - fn should_import_txs_from_same_sender() { - // given - let mut txq = TransactionQueue::new(); - - let (tx, tx2) = new_txs(U256::from(1)); - - // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); - - // then - let top = txq.top_transactions(5); - assert_eq!(top[0], tx); - assert_eq!(top[1], tx2); - assert_eq!(top.len(), 2); - } - - #[test] - fn should_put_transaction_to_futures_if_gap_detected() { - // given - let mut txq = TransactionQueue::new(); - - let (tx, tx2) = new_txs(U256::from(2)); - - // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.pending, 1); - assert_eq!(stats.future, 1); - let top = txq.top_transactions(5); - assert_eq!(top.len(), 1); - assert_eq!(top[0], tx); - } - - #[test] - fn should_move_transactions_if_gap_filled() { - // given - let mut txq = TransactionQueue::new(); - let kp = KeyPair::create().unwrap(); - let secret = kp.secret(); - let tx = new_unsigned_tx(U256::from(123)).sign(&secret); - let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret); - let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret); - - txq.add(tx, &default_nonce); - assert_eq!(txq.status().pending, 1); - txq.add(tx2, &default_nonce); - assert_eq!(txq.status().future, 1); - - // when - txq.add(tx1, &default_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.pending, 3); - assert_eq!(stats.future, 0); - } - - #[test] - fn should_remove_transaction() { - // given - let mut txq2 = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(3)); - txq2.add(tx.clone(), &default_nonce); - txq2.add(tx2.clone(), &default_nonce); - assert_eq!(txq2.status().pending, 1); - assert_eq!(txq2.status().future, 1); - - // when - txq2.remove(&tx.hash(), &default_nonce); - txq2.remove(&tx2.hash(), &default_nonce); - - - // then - let stats = txq2.status(); - assert_eq!(stats.pending, 0); - assert_eq!(stats.future, 0); - } - - #[test] - fn should_move_transactions_to_future_if_gap_introduced() { - // given - let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); - let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); - assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); - assert_eq!(txq.status().pending, 3); - - // when - txq.remove(&tx.hash(), &default_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.future, 1); - assert_eq!(stats.pending, 1); - } - - #[test] - fn should_clear_queue() { - // given - let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::one()); - - // add - txq.add(tx2.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); - let stats = txq.status(); - assert_eq!(stats.pending, 2); - - // when - txq.clear(); - - // then - let stats = txq.status(); - assert_eq!(stats.pending, 0); - } - - #[test] - fn should_drop_old_transactions_when_hitting_the_limit() { - // given - let mut txq = TransactionQueue::with_limits(1, 1); - let (tx, tx2) = new_txs(U256::one()); - txq.add(tx.clone(), &default_nonce); - assert_eq!(txq.status().pending, 1); - - // when - txq.add(tx2.clone(), &default_nonce); - - // then - let t = txq.top_transactions(2); - assert_eq!(txq.status().pending, 1); - assert_eq!(t.len(), 1); - assert_eq!(t[0], tx); - } - - #[test] - fn should_limit_future_transactions() { - let mut txq = TransactionQueue::with_limits(10, 1); - let (tx1, tx2) = new_txs(U256::from(4)); - let (tx3, tx4) = new_txs(U256::from(4)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx3.clone(), &default_nonce); - assert_eq!(txq.status().pending, 2); - - // when - txq.add(tx2.clone(), &default_nonce); - assert_eq!(txq.status().future, 1); - txq.add(tx4.clone(), &default_nonce); - - // then - assert_eq!(txq.status().future, 1); - } - - #[test] - fn should_drop_transactions_with_old_nonces() { - let mut txq = TransactionQueue::new(); - let tx = new_tx(); - let last_nonce = tx.nonce.clone() + U256::one(); - let fetch_last_nonce = |_a: &Address| last_nonce; - - // when - txq.add(tx, &fetch_last_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.pending, 0); - assert_eq!(stats.future, 0); - } - - #[test] - fn should_accept_same_transaction_twice() { - // given - let mut txq = TransactionQueue::new(); - let (tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); - assert_eq!(txq.status().pending, 2); - - // when - txq.remove(&tx1.hash(), &default_nonce); - assert_eq!(txq.status().pending, 0); - assert_eq!(txq.status().future, 1); - txq.add(tx1.clone(), &default_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.future, 0); - assert_eq!(stats.pending, 2); - } - - #[test] - fn should_not_move_to_future_if_state_nonce_is_higher() { - // given - let next_nonce = |a: &Address| default_nonce(a) + U256::one(); - let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); - let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); - assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); - assert_eq!(txq.status().pending, 3); - - // when - txq.remove(&tx.hash(), &next_nonce); - - // then - let stats = txq.status(); - assert_eq!(stats.future, 0); - assert_eq!(stats.pending, 2); - } - -} diff --git a/test.sh b/test.sh index dd71d120a..4957fd762 100755 --- a/test.sh +++ b/test.sh @@ -8,4 +8,5 @@ cargo test --features ethcore/json-tests $1 \ -p ethsync \ -p ethcore-rpc \ -p parity \ + -p ethminer \ -p bigint diff --git a/util/Cargo.toml b/util/Cargo.toml index 9c5cb3fe3..641036191 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -3,7 +3,7 @@ description = "Ethcore utility library" homepage = "http://ethcore.io" license = "GPL-3.0" name = "ethcore-util" -version = "0.9.99" +version = "1.1.0" authors = ["Ethcore "] build = "build.rs" @@ -27,7 +27,7 @@ crossbeam = "0.2" slab = "0.1" sha3 = { path = "sha3" } serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.50", optional = true } json-tests = { path = "json-tests" } rustc_version = "0.1.0" igd = "0.4.2" @@ -44,4 +44,3 @@ dev = ["clippy"] [build-dependencies] vergen = "*" -rustc_version = "0.1" diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 377391eeb..1bd2b994e 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -15,7 +15,6 @@ rustc-serialize = "0.3" arrayvec = "0.3" rand = "0.3.12" serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } heapsize = "0.3" [features] diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index 698b12f42..d185750c2 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -36,10 +36,11 @@ //! The functions here are designed to be fast. //! +#[cfg(all(asm_available, target_arch="x86_64"))] +use std::mem; use std::fmt; use std::cmp; -use std::mem; use std::str::{FromStr}; use std::convert::From; use std::hash::{Hash, Hasher}; @@ -785,14 +786,11 @@ macro_rules! construct_uint { fn visit_str(&mut self, value: &str) -> Result where E: serde::Error { // 0x + len - if value.len() != 2 + $n_words / 8 { + if value.len() > 2 + $n_words * 16 { return Err(serde::Error::custom("Invalid length.")); } - match $name::from_str(&value[2..]) { - Ok(val) => Ok(val), - Err(_) => { return Err(serde::Error::custom("Invalid length.")); } - } + $name::from_str(&value[2..]).map_err(|_| serde::Error::custom("Invalid hex value.")) } fn visit_string(&mut self, value: String) -> Result where E: serde::Error { @@ -1970,6 +1968,39 @@ mod tests { assert_eq!(U256([1, 0, 0, 0]), result); } + #[test] + fn u256_multi_muls() { + let (result, _) = U256([0, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, 0])); + assert_eq!(U256([0, 0, 0, 0]), result); + + let (result, _) = U256([1, 0, 0, 0]).overflowing_mul(U256([1, 0, 0, 0])); + assert_eq!(U256([1, 0, 0, 0]), result); + + let (result, _) = U256([5, 0, 0, 0]).overflowing_mul(U256([5, 0, 0, 0])); + assert_eq!(U256([25, 0, 0, 0]), result); + + let (result, _) = U256([0, 5, 0, 0]).overflowing_mul(U256([0, 5, 0, 0])); + assert_eq!(U256([0, 0, 25, 0]), result); + + let (result, _) = U256([0, 0, 0, 1]).overflowing_mul(U256([1, 0, 0, 0])); + assert_eq!(U256([0, 0, 0, 1]), result); + + let (result, _) = U256([0, 0, 0, 5]).overflowing_mul(U256([2, 0, 0, 0])); + assert_eq!(U256([0, 0, 0, 10]), result); + + let (result, _) = U256([0, 0, 1, 0]).overflowing_mul(U256([0, 5, 0, 0])); + assert_eq!(U256([0, 0, 0, 5]), result); + + let (result, _) = U256([0, 0, 8, 0]).overflowing_mul(U256([0, 0, 7, 0])); + assert_eq!(U256([0, 0, 0, 0]), result); + + let (result, _) = U256([2, 0, 0, 0]).overflowing_mul(U256([0, 5, 0, 0])); + assert_eq!(U256([0, 10, 0, 0]), result); + + let (result, _) = U256([1, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, ::std::u64::MAX])); + assert_eq!(U256([0, 0, 0, ::std::u64::MAX]), result); + } + #[test] fn u256_multi_muls_overflow() { let (_, overflow) = U256([1, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, 0])); @@ -2002,7 +2033,7 @@ mod tests { #[test] - #[cfg_attr(feature = "dev", allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn u256_multi_full_mul() { let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); diff --git a/util/build.rs b/util/build.rs index eed080e29..b0b64a380 100644 --- a/util/build.rs +++ b/util/build.rs @@ -1,3 +1,19 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + extern crate vergen; use vergen::*; diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 5a4500ae6..0683ea4df 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -177,7 +177,7 @@ impl BytesConvertable for T where T: AsRef<[u8]> { #[test] fn bytes_convertable() { assert_eq!(vec![0x12u8, 0x34].bytes(), &[0x12u8, 0x34]); - assert_eq!([0u8; 0].bytes(), &[]); + assert!([0u8; 0].as_slice().is_empty()); } /// Simple trait to allow for raw population of a Sized object from a byte slice. diff --git a/util/src/error.rs b/util/src/error.rs index 68aa3e648..409cc0e5d 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -21,12 +21,13 @@ use network::NetworkError; use rlp::DecoderError; use io; use std::fmt; +use hash::H256; #[derive(Debug)] /// Error in database subsystem. pub enum BaseDataError { /// An entry was removed more times than inserted. - NegativelyReferencedHash, + NegativelyReferencedHash(H256), } #[derive(Debug)] diff --git a/util/src/hash.rs b/util/src/hash.rs index 73fa33b47..fce0720d1 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -257,7 +257,7 @@ macro_rules! impl_hash { return Err(serde::Error::custom("Invalid length.")); } - value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid valid hex.")) + value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid hex value.")) } fn visit_string(&mut self, value: String) -> Result where E: serde::Error { diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 4d8cbaba1..e622c4b99 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -20,7 +20,7 @@ use bytes::*; use std::collections::HashMap; /// Trait modelling datastore keyed by a 32-byte Keccak hash. -pub trait HashDB { +pub trait HashDB : AsHashDB { /// Get the keys in the database together with number of underlying references. fn keys(&self) -> HashMap; @@ -111,3 +111,16 @@ pub trait HashDB { /// ``` fn remove(&mut self, key: &H256) { self.kill(key) } } + +/// Upcast trait. +pub trait AsHashDB { + /// Perform upcast to HashDB for anything that derives from HashDB. + fn as_hashdb(&self) -> &HashDB; + /// Perform mutable upcast to HashDB for anything that derives from HashDB. + fn as_hashdb_mut(&mut self) -> &mut HashDB; +} + +impl AsHashDB for T { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 83fa71b8a..8a34ee80a 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -153,7 +153,7 @@ struct UserTimer { pub struct IoManager where Message: Send + Sync { timers: Arc>>, handlers: Vec>>, - _workers: Vec, + workers: Vec, worker_channel: chase_lev::Worker>, work_ready: Arc, } @@ -180,7 +180,7 @@ impl IoManager where Message: Send + Sync + Clone + 'static { timers: Arc::new(RwLock::new(HashMap::new())), handlers: Vec::new(), worker_channel: worker, - _workers: workers, + workers: workers, work_ready: work_ready, }; try!(event_loop.run(&mut io)); @@ -230,7 +230,10 @@ impl Handler for IoManager where Message: Send + Clone + Sync fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { match msg { - IoMessage::Shutdown => event_loop.shutdown(), + IoMessage::Shutdown => { + self.workers.clear(); + event_loop.shutdown(); + }, IoMessage::AddHandler { handler } => { let handler_id = { self.handlers.push(handler.clone()); diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs deleted file mode 100644 index d0d7c05ff..000000000 --- a/util/src/journaldb.rs +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Disk-backed HashDB implementation. - -use common::*; -use rlp::*; -use hashdb::*; -use memorydb::*; -use kvdb::{Database, DBTransaction, DatabaseConfig}; -#[cfg(test)] -use std::env; - -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay -/// and latent-removal semantics. -/// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to -/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect -/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before -/// the removals actually take effect. -pub struct JournalDB { - overlay: MemoryDB, - backing: Arc, - counters: Option>>>, -} - -impl Clone for JournalDB { - fn clone(&self) -> JournalDB { - JournalDB { - overlay: MemoryDB::new(), - backing: self.backing.clone(), - counters: self.counters.clone(), - } - } -} - -// all keys must be at least 12 bytes -const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; - -const DB_VERSION : u32 = 3; -const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; - -const PADDING : [u8; 10] = [ 0u8; 10 ]; - -impl JournalDB { - /// Create a new instance from file - pub fn new(path: &str) -> JournalDB { - Self::from_prefs(path, true) - } - - /// Create a new instance from file - pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB { - let opts = DatabaseConfig { - prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix - }; - let backing = Database::open(&opts, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - let with_journal; - if !backing.is_empty() { - match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => { with_journal = true; }, - Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; }, - v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) - } - } else { - backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database"); - with_journal = prefer_journal; - } - - let counters = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) - } else { - None - }; - JournalDB { - overlay: MemoryDB::new(), - backing: Arc::new(backing), - counters: counters, - } - } - - /// Create a new instance with an anonymous temporary database. - #[cfg(test)] - pub fn new_temp() -> JournalDB { - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap()) - } - - /// Check if this database has any commits - pub fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() - } - - /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) - } else { - self.commit_without_counters() - } - } - - /// Drain the overlay and place it into a batch for the DB. - fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut inserts = 0usize; - let mut deletes = 0usize; - for i in overlay.drain().into_iter() { - let (key, (value, rc)) = i; - if rc > 0 { - assert!(rc == 1); - batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - inserts += 1; - } - if rc < 0 { - assert!(rc == -1); - deletes += 1; - } - } - trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); - inserts + deletes - } - - /// Just commit the overlay into the backing DB. - fn commit_without_counters(&mut self) -> Result { - let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); - try!(self.backing.write(batch)); - Ok(ret as u32) - } - - fn morph_key(key: &H256, index: u8) -> Bytes { - let mut ret = key.bytes().to_owned(); - ret.push(index); - ret - } - - // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } - fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } - fn is_already_in(backing: &Database, key: &H256) -> bool { - backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() - } - - fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { - for &(ref h, ref d) in inserts { - if let Some(c) = counters.get_mut(h) { - // already counting. increment. - *c += 1; - continue; - } - - // this is the first entry for this node in the journal. - if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { - // already in the backing DB. start counting, and remember it was already in. - Self::set_already_in(batch, &h); - counters.insert(h.clone(), 1); - continue; - } - - // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. - //Self::reset_already_in(&h); - assert!(!Self::is_already_in(backing, &h)); - batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); - } - } - - fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { - trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); - for h in inserts { - if let Some(c) = counters.get_mut(h) { - // already counting. increment. - *c += 1; - continue; - } - - // this is the first entry for this node in the journal. - // it is initialised to 1 if it was already in. - if Self::is_already_in(backing, h) { - trace!("replace_keys: Key {} was already in!", h); - counters.insert(h.clone(), 1); - } - } - trace!("replay_keys: (end) counters={:?}", counters); - } - - fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { - for h in deletes.into_iter() { - let mut n: Option = None; - if let Some(c) = counters.get_mut(&h) { - if *c > 1 { - *c -= 1; - continue; - } else { - n = Some(*c); - } - } - match &n { - &Some(i) if i == 1 => { - counters.remove(&h); - Self::reset_already_in(batch, &h); - } - &None => { - // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. - //assert!(!Self::is_already_in(db, &h)); - batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); - } - _ => panic!("Invalid value in counters: {:?}", n), - } - } - } - - /// Commit all recent insert operations and historical removals from the old era - /// to the backing database. - fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: - // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, n] => [ ... ] - - // TODO: store reclaim_period. - - // When we make a new commit, we make a journal of all blocks in the recent history and record - // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can - // share the same era. This forms a data structure similar to a queue but whose items are tuples. - // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history - // into ancient history) then only one commit from the tuple is considered canonical. This commit - // is kept in the main backing database, whereas any others from the same era are reverted. - // - // It is possible that a key, properly available in the backing database be deleted and re-inserted - // in the recent history queue, yet have both operations in commits that are eventually non-canonical. - // To avoid the original, and still required, key from being deleted, we maintain a reference count - // which includes an original key, if any. - // - // The semantics of the `counter` are: - // insert key k: - // counter already contains k: count += 1 - // counter doesn't contain k: - // backing db contains k: count = 1 - // backing db doesn't contain k: insert into backing db, count = 0 - // delete key k: - // counter contains k (count is asserted to be non-zero): - // count > 1: counter -= 1 - // count == 1: remove counter - // count == 0: remove key from backing db - // counter doesn't contain k: remove key from backing db - // - // Practically, this means that for each commit block turning from recent to ancient we do the - // following: - // is_canonical: - // inserts: Ignored (left alone in the backing database). - // deletes: Enacted; however, recent history queue is checked for ongoing references. This is - // reduced as a preference to deletion from the backing database. - // !is_canonical: - // inserts: Reverted; however, recent history queue is checked for ongoing references. This is - // reduced as a preference to deletion from the backing database. - // deletes: Ignored (they were never inserted). - // - - // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); - let batch = DBTransaction::new(); - { - let mut index = 0usize; - let mut last; - - while try!(self.backing.get({ - let mut r = RlpStream::new_list(3); - r.append(&now); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })).is_some() { - index += 1; - } - - let drained = self.overlay.drain(); - let removes: Vec = drained - .iter() - .filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned() - .collect(); - let inserts: Vec<(H256, Bytes)> = drained - .into_iter() - .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) - .collect(); - - let mut r = RlpStream::new_list(3); - r.append(id); - - // Process the new inserts. - // We use the inserts for three things. For each: - // - we place into the backing DB or increment the counter if already in; - // - we note in the backing db that it was already in; - // - we write the key into our journal for this block; - - r.begin_list(inserts.len()); - inserts.iter().foreach(|&(k, _)| {r.append(&k);}); - r.append(&removes); - Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); - try!(batch.put(&last, r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); - } - - // apply old commits' details - if let Some((end_era, canon_id)) = end { - let mut index = 0usize; - let mut last; - while let Some(rlp_data) = try!(self.backing.get({ - let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })) { - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.val_at(1); - let deletes: Vec = rlp.val_at(2); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); - try!(batch.delete(&last)); - index += 1; - } - trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); - } - - try!(self.backing.write(batch)); -// trace!("JournalDB::commit() deleted {} nodes", deletes); - Ok(0) - } - - fn payload(&self, key: &H256) -> Option { - self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) - } - - fn read_counters(db: &Database) -> HashMap { - let mut counters = HashMap::new(); - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); - loop { - let mut index = 0usize; - while let Some(rlp_data) = db.get({ - let mut r = RlpStream::new_list(3); - r.append(&era); - r.append(&index); - r.append(&&PADDING[..]); - &r.drain() - }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.val_at(1); - Self::replay_keys(&inserts, db, &mut counters); - index += 1; - }; - if index == 0 || era == 0 { - break; - } - era -= 1; - } - } - trace!("Recovered {} counters", counters.len()); - counters - } - - /// Returns heap memory size used - pub fn mem_used(&self) -> usize { - self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 } - } - } - -impl HashDB for JournalDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { - let h = H256::from_slice(key.deref()); - ret.insert(h, 1); - } - - for (key, refs) in self.overlay.keys().into_iter() { - let refs = *ret.get(&key).unwrap_or(&0) + refs; - ret.insert(key, refs); - } - ret - } - - fn lookup(&self, key: &H256) -> Option<&[u8]> { - let k = self.overlay.raw(key); - match k { - Some(&(ref d, rc)) if rc > 0 => Some(d), - _ => { - if let Some(x) = self.payload(key) { - Some(&self.overlay.denote(key, x).0) - } - else { - None - } - } - } - } - - fn exists(&self, key: &H256) -> bool { - self.lookup(key).is_some() - } - - fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) - } - fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); - } - fn kill(&mut self, key: &H256) { - self.overlay.kill(key); - } -} - -#[cfg(test)] -mod tests { - use common::*; - use super::*; - use hashdb::*; - - #[test] - fn insert_same_in_fork() { - // history is 1 - let mut jdb = JournalDB::new_temp(); - - let x = jdb.insert(b"X"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); - jdb.commit(2, &b"2".sha3(), None).unwrap(); - jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); - jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); - - jdb.remove(&x); - jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); - let x = jdb.insert(b"X"); - jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); - - jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); - jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); - - assert!(jdb.exists(&x)); - } - - #[test] - fn long_history() { - // history is 3 - let mut jdb = JournalDB::new_temp(); - let h = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&h)); - jdb.remove(&h); - jdb.commit(1, &b"1".sha3(), None).unwrap(); - assert!(jdb.exists(&h)); - jdb.commit(2, &b"2".sha3(), None).unwrap(); - assert!(jdb.exists(&h)); - jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); - assert!(jdb.exists(&h)); - jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(!jdb.exists(&h)); - } - - #[test] - fn complex() { - // history is 1 - let mut jdb = JournalDB::new_temp(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - - jdb.remove(&foo); - jdb.remove(&bar); - let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - assert!(jdb.exists(&baz)); - - let foo = jdb.insert(b"foo"); - jdb.remove(&baz); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&bar)); - assert!(jdb.exists(&baz)); - - jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&bar)); - assert!(!jdb.exists(&baz)); - - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - assert!(!jdb.exists(&bar)); - assert!(!jdb.exists(&baz)); - } - - #[test] - fn fork() { - // history is 1 - let mut jdb = JournalDB::new_temp(); - - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); - - jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); - - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - assert!(jdb.exists(&baz)); - - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&baz)); - assert!(!jdb.exists(&bar)); - } - - #[test] - fn overwrite() { - // history is 1 - let mut jdb = JournalDB::new_temp(); - - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - assert!(jdb.exists(&foo)); - - jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - jdb.insert(b"foo"); - assert!(jdb.exists(&foo)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - } - - #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = JournalDB::new_temp(); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - - let foo = jdb.insert(b"foo"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); - - jdb.insert(b"foo"); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - - jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - } - - - #[test] - fn reopen() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let bar = H256::random(); - - let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.emplace(bar.clone(), b"bar".to_vec()); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - foo - }; - - { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - } - - { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - assert!(jdb.exists(&foo)); - assert!(jdb.exists(&bar)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - } - } - - #[test] - fn reopen_remove() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let bar = H256::random(); - - let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - - // foo is ancient history. - - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; - - { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - } - } - #[test] - fn reopen_fork() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - let (foo, bar, baz) = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - let bar = jdb.insert(b"bar"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.remove(&foo); - let baz = jdb.insert(b"baz"); - jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); - - jdb.remove(&bar); - jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); - (foo, bar, baz) - }; - - { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); - assert!(!jdb.exists(&baz)); - assert!(!jdb.exists(&bar)); - } - } -} diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs new file mode 100644 index 000000000..83a80b7c2 --- /dev/null +++ b/util/src/journaldb/archivedb.rs @@ -0,0 +1,418 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct ArchiveDB { + overlay: MemoryDB, + backing: Arc, + latest_era: Option, +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 259; + +impl ArchiveDB { + /// Create a new instance from file + pub fn new(path: &str) -> ArchiveDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + ArchiveDB { + overlay: MemoryDB::new(), + backing: Arc::new(backing), + latest_era: latest_era, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> ArchiveDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } +} + +impl HashDB for ArchiveDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for ArchiveDB { + fn spawn(&self) -> Box { + Box::new(ArchiveDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + latest_era: self.latest_era, + }) + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + } + + fn is_empty(&self) -> bool { + self.latest_era.is_none() + } + + fn commit(&mut self, now: u64, _: &H256, _: Option<(u64, H256)>) -> Result { + let batch = DBTransaction::new(); + let mut inserts = 0usize; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc > 0 { + assert!(rc == 1); + batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); + inserts += 1; + } + if rc < 0 { + assert!(rc == -1); + deletes += 1; + } + } + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } + try!(self.backing.write(batch)); + Ok((inserts + deletes) as u32) + } + + fn state(&self, id: &H256) -> Option { + self.backing.get_by_prefix(&id.bytes()[0..12]).and_then(|b| Some(b.to_vec())) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + use journaldb::traits::JournalDB; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = ArchiveDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + } + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + } + } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + + // foo is ancient history. + + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + foo + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + } + } + + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, _, _) = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + } + + #[test] + fn returns_state() { + let temp = ::devtools::RandomTempPath::new(); + + let key = { + let mut jdb = ArchiveDB::new(temp.as_str()); + let key = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + key + }; + + { + let jdb = ArchiveDB::new(temp.as_str()); + let state = jdb.state(&key); + assert!(state.is_some()); + } + } +} diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs new file mode 100644 index 000000000..7cb00b993 --- /dev/null +++ b/util/src/journaldb/earlymergedb.rs @@ -0,0 +1,1062 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +#[derive(Clone, PartialEq, Eq)] +struct RefInfo { + queue_refs: usize, + in_archive: bool, +} + +impl HeapSizeOf for RefInfo { + fn heap_size_of_children(&self) -> usize { 0 } +} + +impl fmt::Display for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +impl fmt::Debug for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +#[derive(Clone, PartialEq, Eq)] +enum RemoveFrom { + Queue, + Archive, +} + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct EarlyMergeDB { + overlay: MemoryDB, + backing: Arc, + refs: Option>>>, + latest_era: Option, +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 3; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl EarlyMergeDB { + /// Create a new instance from file + pub fn new(path: &str) -> EarlyMergeDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let (latest_era, refs) = EarlyMergeDB::read_refs(&backing); + let refs = Some(Arc::new(RwLock::new(refs))); + EarlyMergeDB { + overlay: MemoryDB::new(), + backing: Arc::new(backing), + refs: refs, + latest_era: latest_era, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> EarlyMergeDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + fn morph_key(key: &H256, index: u8) -> Bytes { + let mut ret = key.bytes().to_owned(); + ret.push(index); + ret + } + + // The next three are valid only as long as there is an insert operation of `key` in the journal. + fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } + fn is_already_in(backing: &Database, key: &H256) -> bool { + backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() + } + + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { + for &(ref h, ref d) in inserts { + if let Some(c) = refs.get_mut(h) { + // already counting. increment. + c.queue_refs += 1; + if trace { + trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, c.queue_refs); + } + continue; + } + + // this is the first entry for this node in the journal. + if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { + // already in the backing DB. start counting, and remember it was already in. + Self::set_already_in(batch, &h); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); + } + continue; + } + + // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. + //Self::reset_already_in(&h); + assert!(!Self::is_already_in(backing, &h)); + batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); + } + } + } + + fn replay_keys(inserts: &[H256], backing: &Database, refs: &mut HashMap) { + trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); + for h in inserts { + if let Some(c) = refs.get_mut(h) { + // already counting. increment. + c.queue_refs += 1; + continue; + } + + // this is the first entry for this node in the journal. + // it is initialised to 1 if it was already in. + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, h)}); + } + trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); + } + + fn kill_keys(deletes: &[H256], refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + // with a kill on {queue_refs: 1, in_archive: true}, we have two options: + // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) + // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) + // (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.) + // both are valid, but we switch between them depending on context. + // All inserts in queue (i.e. those which may yet be reverted) have an entry in refs. + for h in deletes.iter() { + let mut n: Option = None; + if let Some(c) = refs.get_mut(h) { + if c.in_archive && from == RemoveFrom::Archive { + c.in_archive = false; + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Reducing to queue only and recording", h); + } + continue; + } else if c.queue_refs > 1 { + c.queue_refs -= 1; + if trace { + trace!(target: "jdb.fine", " kill({}): In queue > 1 refs: Decrementing ref count to {}", h, c.queue_refs); + } + continue; + } else { + n = Some(c.clone()); + } + } + match n { + Some(RefInfo{queue_refs: 1, in_archive: true}) => { + refs.remove(h); + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); + } + } + Some(RefInfo{queue_refs: 1, in_archive: false}) => { + refs.remove(h); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); + } + } + None => { + // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. + //assert!(!Self::is_already_in(db, &h)); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); + } + } + _ => panic!("Invalid value in refs: {:?}", n), + } + } + } + + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let (latest_era, reconstructed) = Self::read_refs(&self.backing); + let refs = self.refs.as_ref().unwrap().write().unwrap(); + if *refs != reconstructed || latest_era != self.latest_era { + let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); + false + } else { + true + } + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_refs(db: &Database) -> (Option, HashMap) { + let mut refs = HashMap::new(); + let mut latest_era = None; + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val); + latest_era = Some(era); + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + Self::replay_keys(&inserts, db, &mut refs); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + (latest_era, refs) + } + } + +impl HashDB for EarlyMergeDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for EarlyMergeDB { + fn spawn(&self) -> Box { + Box::new(EarlyMergeDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + refs: self.refs.clone(), + latest_era: self.latest_era.clone(), + }) + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.refs { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + + + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store reclaim_period. + + // When we make a new commit, we make a journal of all blocks in the recent history and record + // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can + // share the same era. This forms a data structure similar to a queue but whose items are tuples. + // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history + // into ancient history) then only one commit from the tuple is considered canonical. This commit + // is kept in the main backing database, whereas any others from the same era are reverted. + // + // It is possible that a key, properly available in the backing database be deleted and re-inserted + // in the recent history queue, yet have both operations in commits that are eventually non-canonical. + // To avoid the original, and still required, key from being deleted, we maintain a reference count + // which includes an original key, if any. + // + // The semantics of the `counter` are: + // insert key k: + // counter already contains k: count += 1 + // counter doesn't contain k: + // backing db contains k: count = 1 + // backing db doesn't contain k: insert into backing db, count = 0 + // delete key k: + // counter contains k (count is asserted to be non-zero): + // count > 1: counter -= 1 + // count == 1: remove counter + // count == 0: remove key from backing db + // counter doesn't contain k: remove key from backing db + // + // Practically, this means that for each commit block turning from recent to ancient we do the + // following: + // is_canonical: + // inserts: Ignored (left alone in the backing database). + // deletes: Enacted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // !is_canonical: + // inserts: Reverted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // deletes: Ignored (they were never inserted). + // + + // record new commit's details. + let mut refs = self.refs.as_ref().unwrap().write().unwrap(); + let batch = DBTransaction::new(); + let trace = false; + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&now); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })).is_some() { + index += 1; + } + + let drained = self.overlay.drain(); + + if trace { + trace!(target: "jdb", "commit: #{} ({}), end era: {:?}", now, id, end); + } + + let removes: Vec = drained + .iter() + .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) + .collect(); + let inserts: Vec<(H256, Bytes)> = drained + .into_iter() + .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) + .collect(); + + + // TODO: check all removes are in the db. + + let mut r = RlpStream::new_list(3); + r.append(id); + + // Process the new inserts. + // We use the inserts for three things. For each: + // - we place into the backing DB or increment the counter if already in; + // - we note in the backing db that it was already in; + // - we write the key into our journal for this block; + + r.begin_list(inserts.len()); + inserts.iter().foreach(|&(k, _)| {r.append(&k);}); + r.append(&removes); + Self::insert_keys(&inserts, &self.backing, &mut refs, &batch, trace); + if trace { + let ins = inserts.iter().map(|&(k, _)| k).collect::>(); + trace!(target: "jdb.ops", " Inserts: {:?}", ins); + trace!(target: "jdb.ops", " Deletes: {:?}", removes); + } + try!(batch.put(&last, r.as_raw())); + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } + } + + // apply old commits' details + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })) { + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + + if canon_id == rlp.val_at(0) { + // Collect keys to be removed. Canon block - remove the (enacted) deletes. + let deletes: Vec = rlp.val_at(2); + if trace { + trace!(target: "jdb.ops", " Expunging: {:?}", deletes); + } + Self::kill_keys(&deletes, &mut refs, &batch, RemoveFrom::Archive, trace); + + if trace { + trace!(target: "jdb.ops", " Finalising: {:?}", inserts); + } + for k in &inserts { + match refs.get(k).cloned() { + None => { + // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert + // already expunged from the queue (which is allowed since the key is in the archive). + // leave well alone. + } + Some( RefInfo{queue_refs: 1, in_archive: false} ) => { + // just delete the refs entry. + refs.remove(k); + } + Some( RefInfo{queue_refs: x, in_archive: false} ) => { + // must set already in; , + Self::set_already_in(&batch, k); + refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); + } + Some( RefInfo{in_archive: true, ..} ) => { + // Invalid! Reinserted the same key twice. + warn!("Key {} inserted twice into same fork.", k); + } + } + } + } else { + // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. + if trace { + trace!(target: "jdb.ops", " Reverting: {:?}", inserts); + } + Self::kill_keys(&inserts, &mut refs, &batch, RemoveFrom::Queue, trace); + } + + try!(batch.delete(&last)); + index += 1; + } + if trace { + trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + } + } + + try!(self.backing.write(batch)); + + // Comment out for now. TODO: automatically enable in tests. + + if trace { + trace!(target: "jdb", "OK: {:?}", refs.clone()); + } + + Ok(0) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use super::super::traits::JournalDB; + use hashdb::*; + use log::init_log; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&x)); + } + + #[test] + fn insert_older_era() { + let mut jdb = EarlyMergeDB::new_temp(); + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(0, &b"0b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = EarlyMergeDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = EarlyMergeDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + foo + }; + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + (foo, bar, baz) + }; + + { + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } +} diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs new file mode 100644 index 000000000..e73c12969 --- /dev/null +++ b/util/src/journaldb/mod.rs @@ -0,0 +1,81 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! JournalDB interface and implementation. + +use common::*; + +/// Export the journaldb module. +pub mod traits; +mod archivedb; +mod earlymergedb; +mod overlayrecentdb; +mod refcounteddb; + +/// Export the JournalDB trait. +pub use self::traits::JournalDB; + +/// A journal database algorithm. +#[derive(Debug)] +pub enum Algorithm { + /// Keep all keys forever. + Archive, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into backing database, journal retains knowledge of whether backing DB key is + /// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB. + EarlyMerge, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets + /// flushed in backing only at end of recent history. + OverlayRecent, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// References are counted in disk-backed DB. + RefCounted, +} + +impl Default for Algorithm { + fn default() -> Algorithm { Algorithm::Archive } +} + +impl fmt::Display for Algorithm { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", match self { + &Algorithm::Archive => "archive", + &Algorithm::EarlyMerge => "earlymerge", + &Algorithm::OverlayRecent => "overlayrecent", + &Algorithm::RefCounted => "refcounted", + }) + } +} + +/// Create a new JournalDB trait object. +pub fn new(path: &str, algorithm: Algorithm) -> Box { + match algorithm { + Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), + Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path)), + } +} diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs new file mode 100644 index 000000000..efbd26c3b --- /dev/null +++ b/util/src/journaldb/overlayrecentdb.rs @@ -0,0 +1,893 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! JournalDB over in-memory overlay + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; +use super::JournalDB; + +/// Implementation of the JournalDB trait for a disk-backed database with a memory overlay +/// and, possibly, latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +/// +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history +/// overlay on each `commit()` +/// - History overlay contains all data inserted during the history period. When the node +/// in the overlay becomes ancient it is written to disk on `commit()` +/// +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track +/// data nodes that go out of history scope and must be written to disk. +/// +/// Commit workflow: +/// 1. Create a new journal record from the transaction overlay. +/// 2. Inseart each node from the transaction overlay into the History overlay increasing reference +/// count if it is already there. Note that the reference counting is managed by `MemoryDB` +/// 3. Clear the transaction overlay. +/// 4. For a canonical journal record that becomes ancient inserts its insertions into the disk DB +/// 5. For each journal record that goes out of the history scope (becomes ancient) remove its +/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// if reaches zero. +/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if +/// the removed key is not present in the history overlay. +/// 7. Delete ancient record from memory and disk. + +pub struct OverlayRecentDB { + transaction_overlay: MemoryDB, + backing: Arc, + journal_overlay: Arc>, +} + +#[derive(PartialEq)] +struct JournalOverlay { + backing_overlay: MemoryDB, + journal: HashMap>, + latest_era: Option, +} + +#[derive(PartialEq)] +struct JournalEntry { + id: H256, + insertions: Vec, + deletions: Vec, +} + +impl HeapSizeOf for JournalEntry { + fn heap_size_of_children(&self) -> usize { + self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() + } +} + +impl Clone for OverlayRecentDB { + fn clone(&self) -> OverlayRecentDB { + OverlayRecentDB { + transaction_overlay: MemoryDB::new(), + backing: self.backing.clone(), + journal_overlay: self.journal_overlay.clone(), + } + } +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 0x200 + 3; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl OverlayRecentDB { + /// Create a new instance from file + pub fn new(path: &str) -> OverlayRecentDB { + Self::from_prefs(path) + } + + /// Create a new instance from file + pub fn from_prefs(path: &str) -> OverlayRecentDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {} + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing))); + OverlayRecentDB { + transaction_overlay: MemoryDB::new(), + backing: Arc::new(backing), + journal_overlay: journal_overlay, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + pub fn new_temp() -> OverlayRecentDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&self.backing); + let journal_overlay = self.journal_overlay.read().unwrap(); + *journal_overlay == reconstructed + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_overlay(db: &Database) -> JournalOverlay { + let mut journal = HashMap::new(); + let mut overlay = MemoryDB::new(); + let mut count = 0; + let mut latest_era = None; + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val); + latest_era = Some(era); + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + trace!("read_overlay: era={}, index={}", era, index); + let rlp = Rlp::new(&rlp_data); + let id: H256 = rlp.val_at(0); + let insertions = rlp.at(1); + let deletions: Vec = rlp.val_at(2); + let mut inserted_keys = Vec::new(); + for r in insertions.iter() { + let k: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + overlay.emplace(k.clone(), v); + inserted_keys.push(k); + count += 1; + } + journal.entry(era).or_insert_with(Vec::new).push(JournalEntry { + id: id, + insertions: inserted_keys, + deletions: deletions, + }); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); + JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } + } +} + +impl JournalDB for OverlayRecentDB { + fn spawn(&self) -> Box { + Box::new(self.clone()) + } + + fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + let overlay = self.journal_overlay.read().unwrap(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.journal.heap_size_of_children(); + mem + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // record new commit's details. + trace!("commit: #{} ({}), end era: {:?}", now, id, end); + let mut journal_overlay = self.journal_overlay.write().unwrap(); + let batch = DBTransaction::new(); + { + let mut r = RlpStream::new_list(3); + let mut tx = self.transaction_overlay.drain(); + let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); + let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); + // Increase counter for each inserted key no matter if the block is canonical or not. + let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); + r.append(id); + r.begin_list(inserted_keys.len()); + for (k, v) in insertions { + r.begin_list(2); + r.append(&k); + r.append(&v); + journal_overlay.backing_overlay.emplace(k, v); + } + r.append(&removed_keys); + + let mut k = RlpStream::new_list(3); + let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len()); + k.append(&now); + k.append(&index); + k.append(&&PADDING[..]); + try!(batch.put(&k.drain(), r.as_raw())); + if journal_overlay.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.latest_era = Some(now); + } + journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); + } + + let journal_overlay = journal_overlay.deref_mut(); + // apply old commits' details + if let Some((end_era, canon_id)) = end { + if let Some(ref mut records) = journal_overlay.journal.get_mut(&end_era) { + let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); + let mut canon_deletions: Vec = Vec::new(); + let mut overlay_deletions: Vec = Vec::new(); + let mut index = 0usize; + for mut journal in records.drain(..) { + //delete the record from the db + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + try!(batch.delete(&r.drain())); + trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); + { + if canon_id == journal.id { + for h in &journal.insertions { + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if rc > 0 { + canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy + } + } + } + canon_deletions = journal.deletions; + } + overlay_deletions.append(&mut journal.insertions); + } + index += 1; + } + // apply canon inserts first + for (k, v) in canon_insertions { + try!(batch.put(&k, &v)); + } + // update the overlay + for k in overlay_deletions { + journal_overlay.backing_overlay.kill(&k); + } + // apply canon deletions + for k in canon_deletions { + if !journal_overlay.backing_overlay.exists(&k) { + try!(batch.delete(&k)); + } + } + journal_overlay.backing_overlay.purge(); + } + journal_overlay.journal.remove(&end_era); + } + try!(self.backing.write(batch)); + Ok(0) + } + +} + +impl HashDB for OverlayRecentDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.transaction_overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.transaction_overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + let v = self.journal_overlay.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec()); + match v { + Some(x) => { + Some(&self.transaction_overlay.denote(key, x).0) + } + _ => { + if let Some(x) = self.payload(key) { + Some(&self.transaction_overlay.denote(key, x).0) + } + else { + None + } + } + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.transaction_overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.transaction_overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.transaction_overlay.kill(key); + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + use log::init_log; + use journaldb::JournalDB; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = OverlayRecentDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = OverlayRecentDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + foo + }; + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + (foo, bar, baz) + }; + + { + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } + + #[test] + fn insert_older_era() { + let mut jdb = OverlayRecentDB::new_temp(); + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(0, &b"0b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + } +} diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs new file mode 100644 index 000000000..590964247 --- /dev/null +++ b/util/src/journaldb/refcounteddb.rs @@ -0,0 +1,285 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed, ref-counted JournalDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use overlaydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct RefCountedDB { + forward: OverlayDB, + backing: Arc, + latest_era: Option, + inserts: Vec, + removes: Vec, +} + +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 512; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl RefCountedDB { + /// Create a new instance given a `backing` database. + pub fn new(path: &str) -> RefCountedDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let backing = Arc::new(backing); + let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + + RefCountedDB { + forward: OverlayDB::new_with_arc(backing.clone()), + backing: backing, + inserts: vec![], + removes: vec![], + latest_era: latest_era, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> RefCountedDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } +} + +impl HashDB for RefCountedDB { + fn keys(&self) -> HashMap { self.forward.keys() } + fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) } + fn exists(&self, key: &H256) -> bool { self.forward.exists(key) } + fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } + fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } + fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } +} + +impl JournalDB for RefCountedDB { + fn spawn(&self) -> Box { + Box::new(RefCountedDB { + forward: self.forward.clone(), + backing: self.backing.clone(), + latest_era: self.latest_era, + inserts: self.inserts.clone(), + removes: self.removes.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.inserts.heap_size_of_children() + self.removes.heap_size_of_children() + } + + fn is_empty(&self) -> bool { + self.latest_era.is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store last_era, reclaim_period. + + // when we make a new commit, we journal the inserts and removes. + // for each end_era that we journaled that we are no passing by, + // we remove all of its removes assuming it is canonical and all + // of its inserts otherwise. + + // record new commit's details. + let batch = DBTransaction::new(); + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&now); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })).is_some() { + index += 1; + } + + let mut r = RlpStream::new_list(3); + r.append(id); + r.append(&self.inserts); + r.append(&self.removes); + try!(batch.put(&last, r.as_raw())); + + trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes); + + self.inserts.clear(); + self.removes.clear(); + + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } + } + + // apply old commits' details + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = { +// trace!(target: "rcdb", "checking for journal #{}.{}", end_era, index); + try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })) + } { + let rlp = Rlp::new(&rlp_data); + let our_id: H256 = rlp.val_at(0); + let to_remove: Vec = rlp.val_at(if canon_id == our_id {2} else {1}); + trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, index, our_id, canon_id, to_remove); + for i in &to_remove { + self.forward.remove(i); + } + try!(batch.delete(&last)); + index += 1; + } + } + + let r = try!(self.forward.commit_to_batch(&batch)); + try!(self.backing.write(batch)); + Ok(r) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use super::super::traits::JournalDB; + use hashdb::*; + + #[test] + fn long_history() { + // history is 3 + let mut jdb = RefCountedDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = RefCountedDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = RefCountedDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } +} diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs new file mode 100644 index 000000000..017c24330 --- /dev/null +++ b/util/src/journaldb/traits.rs @@ -0,0 +1,42 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use hashdb::*; + +/// A HashDB which can manage a short-term journal potentially containing many forks of mutually +/// exclusive actions. +pub trait JournalDB : HashDB + Send + Sync { + /// Return a copy of ourself, in a box. + fn spawn(&self) -> Box; + + /// Returns heap memory size used + fn mem_used(&self) -> usize; + + /// Check if this database has any commits + fn is_empty(&self) -> bool; + + /// Commit all recent insert operations and canonical historical commits' removals from the + /// old era to the backing database, reverting any non-canonical historical commit's inserts. + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; + + /// State data query + fn state(&self, _id: &H256) -> Option { + None + } +} diff --git a/util/src/keys/directory.rs b/util/src/keys/directory.rs index d0d3393cd..a92bf4593 100644 --- a/util/src/keys/directory.rs +++ b/util/src/keys/directory.rs @@ -542,6 +542,8 @@ impl KeyDirectory { if removes.is_empty() { return; } let mut cache = self.cache.write().unwrap(); for key in removes { cache.remove(&key); } + + cache.shrink_to_fit(); } /// Reports how many keys are currently cached. diff --git a/util/src/keys/geth_import.rs b/util/src/keys/geth_import.rs index dbd9f0fe0..6c684c37d 100644 --- a/util/src/keys/geth_import.rs +++ b/util/src/keys/geth_import.rs @@ -161,6 +161,7 @@ mod tests { } #[test] + #[cfg(feature="heavy-tests")] fn can_decrypt_with_imported() { use keys::store::EncryptedHashMap; diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 625d6fd8f..78540bdb0 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -78,17 +78,97 @@ struct AccountUnlock { expires: DateTime, } +/// Basic account management trait +pub trait AccountProvider : Send + Sync { + /// Lists all accounts + fn accounts(&self) -> Result, ::std::io::Error>; + /// Unlocks account with the password provided + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError>; + /// Creates account + fn new_account(&self, pass: &str) -> Result; + /// Returns secret for unlocked account + fn account_secret(&self, account: &Address) -> Result; + /// Returns secret for unlocked account + fn sign(&self, account: &Address, message: &H256) -> Result; +} + +/// Thread-safe accounts management +pub struct AccountService { + secret_store: RwLock, +} + +impl AccountProvider for AccountService { + /// Lists all accounts + fn accounts(&self) -> Result, ::std::io::Error> { + Ok(try!(self.secret_store.read().unwrap().accounts()).iter().map(|&(addr, _)| addr).collect::>()) + } + /// Unlocks account with the password provided + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + self.secret_store.read().unwrap().unlock_account(account, pass) + } + /// Creates account + fn new_account(&self, pass: &str) -> Result { + self.secret_store.write().unwrap().new_account(pass) + } + /// Returns secret for unlocked account + fn account_secret(&self, account: &Address) -> Result { + self.secret_store.read().unwrap().account_secret(account) + } + /// Returns secret for unlocked account + fn sign(&self, account: &Address, message: &H256) -> Result { + self.secret_store.read().unwrap().sign(account, message) + } +} + +impl Default for AccountService { + fn default() -> Self { + AccountService::new() + } +} + +impl AccountService { + /// New account service with the default location + pub fn new() -> Self { + let secret_store = RwLock::new(SecretStore::new()); + secret_store.write().unwrap().try_import_existing(); + AccountService { + secret_store: secret_store + } + } + + #[cfg(test)] + fn new_test(temp: &::devtools::RandomTempPath) -> Self { + let secret_store = RwLock::new(SecretStore::new_test(temp)); + AccountService { + secret_store: secret_store + } + } + + /// Ticks the account service + pub fn tick(&self) { + self.secret_store.write().unwrap().collect_garbage(); + } +} + + +impl Default for SecretStore { + fn default() -> Self { + SecretStore::new() + } +} + impl SecretStore { /// new instance of Secret Store in default home directory - pub fn new() -> SecretStore { + pub fn new() -> Self { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); + ::std::fs::create_dir_all(&path).expect("Should panic since it is critical to be able to access home dir"); Self::new_in(&path) } /// new instance of Secret Store in specific directory - pub fn new_in(path: &Path) -> SecretStore { + pub fn new_in(path: &Path) -> Self { SecretStore { directory: KeyDirectory::new(path), unlocks: RwLock::new(HashMap::new()), @@ -160,12 +240,12 @@ impl SecretStore { /// Creates new account pub fn new_account(&mut self, pass: &str) -> Result { - let secret = H256::random(); + let key_pair = crypto::KeyPair::create().expect("Error creating key-pair. Something wrong with crypto libraries?"); + let address = Address::from(key_pair.public().sha3()); let key_id = H128::random(); - self.insert(key_id.clone(), secret, pass); + self.insert(key_id.clone(), key_pair.secret().clone(), pass); let mut key_file = self.directory.get(&key_id).expect("the key was just inserted"); - let address = Address::random(); key_file.account = Some(address); try!(self.directory.save(key_file)); Ok(address) @@ -190,6 +270,20 @@ impl SecretStore { let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); Ok(unlock.secret as crypto::Secret) } + + /// Makes account unlocks expire and removes unused key files from memory + pub fn collect_garbage(&mut self) { + let mut garbage_lock = self.unlocks.write().unwrap(); + self.directory.collect_garbage(); + let utc = UTC::now(); + let expired_addresses = garbage_lock.iter() + .filter(|&(_, unlock)| unlock.expires < utc) + .map(|(address, _)| address.clone()).collect::>(); + + for expired in expired_addresses { garbage_lock.remove(&expired); } + + garbage_lock.shrink_to_fit(); + } } fn derive_key_iterations(password: &str, salt: &H256, c: u32) -> (Bytes, Bytes) { @@ -296,12 +390,11 @@ impl EncryptedHashMap for SecretStore { } -#[cfg(test)] +#[cfg(all(test, feature="heavy-tests"))] mod vector_tests { use super::{derive_mac,derive_key_iterations}; use common::*; - #[test] fn mac_vector() { let password = "testpassword"; @@ -327,6 +420,8 @@ mod tests { use super::*; use devtools::*; use common::*; + use crypto::KeyPair; + use chrono::*; #[test] fn can_insert() { @@ -403,6 +498,7 @@ mod tests { } #[test] + #[cfg(feature="heavy-tests")] fn can_get() { let temp = RandomTempPath::create_dir(); let key_id = { @@ -501,4 +597,43 @@ mod tests { let accounts = sstore.accounts().unwrap(); assert_eq!(30, accounts.len()); } + + #[test] + fn validate_generated_addresses() { + let temp = RandomTempPath::create_dir(); + let mut sstore = SecretStore::new_test(&temp); + let addr = sstore.new_account("test").unwrap(); + sstore.unlock_account(&addr, "test").unwrap(); + let secret = sstore.account_secret(&addr).unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(Address::from(kp.public().sha3()), addr); + } + + #[test] + fn can_create_service() { + let temp = RandomTempPath::create_dir(); + let svc = AccountService::new_test(&temp); + assert!(svc.accounts().unwrap().is_empty()); + } + + #[test] + fn accounts_expire() { + use std::collections::hash_map::*; + + let temp = RandomTempPath::create_dir(); + let svc = AccountService::new_test(&temp); + let address = svc.new_account("pass").unwrap(); + svc.unlock_account(&address, "pass").unwrap(); + assert!(svc.account_secret(&address).is_ok()); + { + let ss_rw = svc.secret_store.write().unwrap(); + let mut ua_rw = ss_rw.unlocks.write().unwrap(); + let entry = ua_rw.entry(address); + if let Entry::Occupied(mut occupied) = entry { occupied.get_mut().expires = UTC::now() - Duration::minutes(1); } + } + + svc.tick(); + + assert!(svc.account_secret(&address).is_err()); + } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 43a9fc532..df5c2c448 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,6 +16,7 @@ //! Key-Value store abstraction with RocksDB backend. +use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction}; @@ -24,6 +25,12 @@ pub struct DBTransaction { batch: WriteBatch, } +impl Default for DBTransaction { + fn default() -> Self { + DBTransaction::new() + } +} + impl DBTransaction { /// Create new transaction. pub fn new() -> DBTransaction { @@ -55,8 +62,7 @@ pub struct DatabaseIterator<'a> { impl<'a> Iterator for DatabaseIterator<'a> { type Item = (Box<[u8]>, Box<[u8]>); - #[cfg_attr(feature="dev", allow(type_complexity))] - fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> { + fn next(&mut self) -> Option { self.iter.next() } } diff --git a/util/src/lib.rs b/util/src/lib.rs index 344da0980..cdc3a3f19 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -27,6 +27,8 @@ #![cfg_attr(feature="dev", allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(feature="dev", allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(feature="dev", allow(if_not_else))] //! Ethcore-util library //! @@ -154,7 +156,7 @@ pub use rlp::*; pub use hashdb::*; pub use memorydb::*; pub use overlaydb::*; -pub use journaldb::*; +pub use journaldb::JournalDB; pub use math::*; pub use crypto::*; pub use triehash::*; diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..bada4c4c6 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -24,6 +24,7 @@ use hashdb::*; use heapsize::*; use std::mem; use std::collections::HashMap; +use std::default::Default; #[derive(Debug,Clone)] /// Reference-counted memory-based HashDB implementation. @@ -32,7 +33,7 @@ use std::collections::HashMap; /// with `kill()`, check for existance with `exists()` and lookup a hash to derive /// the data with `lookup()`. Clear with `clear()` and purge the portions of the data /// that have no references with `purge()`. -/// +/// /// # Example /// ```rust /// extern crate ethcore_util; @@ -69,11 +70,18 @@ use std::collections::HashMap; /// assert!(!m.exists(&k)); /// } /// ``` +#[derive(PartialEq)] pub struct MemoryDB { data: HashMap, static_null_rlp: (Bytes, i32), } +impl Default for MemoryDB { + fn default() -> Self { + MemoryDB::new() + } +} + impl MemoryDB { /// Create a new instance of the memory DB. pub fn new() -> MemoryDB { @@ -133,7 +141,7 @@ impl MemoryDB { /// Denote than an existing value has the given key. Used when a key gets removed without /// a prior insert and thus has a negative reference with no value. - /// + /// /// May safely be called even if the key's value is known, in which case it will be a no-op. pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { if self.raw(key) == None { diff --git a/util/src/misc.rs b/util/src/misc.rs index 39ccbf2da..8dcd25988 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -70,7 +70,7 @@ pub fn contents(name: &str) -> Result { /// Get the standard version string for this software. pub fn version() -> String { - format!("Parity/v{}-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version()) + format!("Parity/v{}-unstable-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version()) } /// Get the standard version data for this software. @@ -85,4 +85,4 @@ pub fn version_data() -> Bytes { s.append(&format!("{}", rustc_version::version())); s.append(&&Target::os()[0..2]); s.out() -} \ No newline at end of file +} diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index fe65be6d1..a560c1a91 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -160,12 +160,12 @@ impl Connection { } } - /// Get socket token + /// Get socket token pub fn token(&self) -> StreamToken { self.token } - /// Replace socket token + /// Replace socket token pub fn set_token(&mut self, token: StreamToken) { self.token = token; } @@ -261,13 +261,13 @@ pub struct EncryptedConnection { } impl EncryptedConnection { - - /// Get socket token + + /// Get socket token pub fn token(&self) -> StreamToken { self.connection.token } - /// Replace socket token + /// Replace socket token pub fn set_token(&mut self, token: StreamToken) { self.connection.set_token(token); } @@ -513,8 +513,14 @@ mod tests { buf_size: usize, } + impl Default for TestSocket { + fn default() -> Self { + TestSocket::new() + } + } + impl TestSocket { - fn new() -> TestSocket { + fn new() -> Self { TestSocket { read_buffer: vec![], write_buffer: vec![], @@ -593,8 +599,14 @@ mod tests { type TestConnection = GenericConnection; + impl Default for TestConnection { + fn default() -> Self { + TestConnection::new() + } + } + impl TestConnection { - pub fn new() -> TestConnection { + pub fn new() -> Self { TestConnection { token: 999998888usize, socket: TestSocket::new(), @@ -609,8 +621,14 @@ mod tests { type TestBrokenConnection = GenericConnection; + impl Default for TestBrokenConnection { + fn default() -> Self { + TestBrokenConnection::new() + } + } + impl TestBrokenConnection { - pub fn new() -> TestBrokenConnection { + pub fn new() -> Self { TestBrokenConnection { token: 999998888usize, socket: TestBrokenSocket { error: "test broken socket".to_owned() }, diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index e52d5d25f..d755c58e7 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -18,7 +18,7 @@ use bytes::Bytes; use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; -use std::cmp; +use std::default::Default; use mio::*; use mio::udp::*; use sha3::*; @@ -62,8 +62,14 @@ struct NodeBucket { nodes: VecDeque, //sorted by last active } +impl Default for NodeBucket { + fn default() -> Self { + NodeBucket::new() + } +} + impl NodeBucket { - fn new() -> NodeBucket { + fn new() -> Self { NodeBucket { nodes: VecDeque::new() } @@ -113,14 +119,14 @@ impl Discovery { } /// Add a new node to discovery table. Pings the node. - pub fn add_node(&mut self, e: NodeEntry) { + pub fn add_node(&mut self, e: NodeEntry) { let endpoint = e.endpoint.clone(); self.update_node(e); self.ping(&endpoint); } /// Add a list of known nodes to the table. - pub fn init_node_list(&mut self, mut nodes: Vec) { + pub fn init_node_list(&mut self, mut nodes: Vec) { for n in nodes.drain(..) { self.update_node(n); } @@ -251,7 +257,7 @@ impl Discovery { // Sort nodes by distance to target for bucket in buckets { for node in &bucket.nodes { - let distance = Discovery::distance(target, &node.address.id); + let distance = Discovery::distance(target, &node.address.id); found.entry(distance).or_insert_with(Vec::new).push(&node.address); if count == BUCKET_SIZE { // delete the most distant element @@ -291,7 +297,7 @@ impl Discovery { return; } Err(e) => { - warn!("UDP send error: {:?}, address: {:?}", e, &data.address); + debug!("UDP send error: {:?}, address: {:?}", e, &data.address); return; } } @@ -310,8 +316,8 @@ impl Discovery { None }), Ok(_) => None, - Err(e) => { - warn!("Error reading UPD socket: {:?}", e); + Err(e) => { + debug!("Error reading UPD socket: {:?}", e); None } } @@ -339,7 +345,7 @@ impl Discovery { PACKET_PONG => self.on_pong(&rlp, &node_id, &from), PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from), PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from), - _ => { + _ => { debug!("Unknown UDP packet: {}", packet_id); Ok(None) } @@ -367,14 +373,14 @@ impl Discovery { } else { self.update_node(entry.clone()); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); } let hash = rlp.as_raw().sha3(); let mut response = RlpStream::new_list(2); dest.to_rlp_list(&mut response); response.append(&hash); self.send_packet(PACKET_PONG, from, &response.drain()); - + Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() })) } @@ -391,7 +397,7 @@ impl Discovery { } self.clear_ping(node); let mut added_map = HashMap::new(); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); Ok(None) } @@ -400,27 +406,34 @@ impl Discovery { let target: NodeId = try!(rlp.val_at(0)); let timestamp: u64 = try!(rlp.val_at(1)); try!(self.check_timestamp(timestamp)); - let limit = (MAX_DATAGRAM_SIZE - 109) / 90; let nearest = Discovery::nearest_node_entries(&target, &self.node_buckets); if nearest.is_empty() { return Ok(None); } - let mut rlp = RlpStream::new_list(1); - rlp.begin_list(cmp::min(limit, nearest.len())); - for n in 0 .. nearest.len() { - rlp.begin_list(4); - nearest[n].endpoint.to_rlp(&mut rlp); - rlp.append(&nearest[n].id); - if (n + 1) % limit == 0 || n == nearest.len() - 1 { - self.send_packet(PACKET_NEIGHBOURS, &from, &rlp.drain()); - trace!(target: "discovery", "Sent {} Neighbours to {:?}", n, &from); - rlp = RlpStream::new_list(1); - rlp.begin_list(cmp::min(limit, nearest.len() - n)); - } + let mut packets = Discovery::prepare_neighbours_packets(&nearest); + for p in packets.drain(..) { + self.send_packet(PACKET_NEIGHBOURS, &from, &p); } + trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &from); Ok(None) } + fn prepare_neighbours_packets(nearest: &[NodeEntry]) -> Vec { + let limit = (MAX_DATAGRAM_SIZE - 109) / 90; + let chunks = nearest.chunks(limit); + let packets = chunks.map(|c| { + let mut rlp = RlpStream::new_list(1); + rlp.begin_list(c.len()); + for n in 0 .. c.len() { + rlp.begin_list(4); + c[n].endpoint.to_rlp(&mut rlp); + rlp.append(&c[n].id); + } + rlp.out() + }); + packets.collect() + } + fn on_neighbours(&mut self, rlp: &UntrustedRlp, _node: &NodeId, from: &SocketAddr) -> Result, NetworkError> { // TODO: validate packet let mut added = HashMap::new(); @@ -466,8 +479,8 @@ impl Discovery { pub fn round(&mut self) -> Option { let removed = self.check_expired(false); self.discover(); - if !removed.is_empty() { - Some(TableUpdates { added: HashMap::new(), removed: removed }) + if !removed.is_empty() { + Some(TableUpdates { added: HashMap::new(), removed: removed }) } else { None } } @@ -499,6 +512,24 @@ mod tests { use crypto::KeyPair; use std::str::FromStr; use rustc_serialize::hex::FromHex; + use rlp::*; + + #[test] + fn find_node() { + let mut nearest = Vec::new(); + let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7770").unwrap(); + for _ in 0..1000 { + nearest.push( NodeEntry { id: node.id.clone(), endpoint: node.endpoint.clone() }); + } + + let packets = Discovery::prepare_neighbours_packets(&nearest); + assert_eq!(packets.len(), 77); + for p in &packets[0..76] { + assert!(p.len() > 1280/2); + assert!(p.len() <= 1280); + } + assert!(packets.last().unwrap().len() > 0); + } #[test] fn discovery() { diff --git a/util/src/network/host.rs b/util/src/network/host.rs index ece24a1d1..02c576424 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -23,6 +23,7 @@ use std::ops::*; use std::cmp::min; use std::path::{Path, PathBuf}; use std::io::{Read, Write}; +use std::default::Default; use std::fs; use mio::*; use mio::tcp::*; @@ -75,9 +76,15 @@ pub struct NetworkConfiguration { pub ideal_peers: u32, } +impl Default for NetworkConfiguration { + fn default() -> Self { + NetworkConfiguration::new() + } +} + impl NetworkConfiguration { /// Create a new instance of default settings. - pub fn new() -> NetworkConfiguration { + pub fn new() -> Self { NetworkConfiguration { config_path: None, listen_address: None, @@ -534,7 +541,7 @@ impl Host where Message: Send + Sync + Clone { match TcpStream::connect(&address) { Ok(socket) => socket, Err(e) => { - warn!("Can't connect to address {:?}: {:?}", address, e); + debug!("Can't connect to address {:?}: {:?}", address, e); return; } } @@ -680,6 +687,8 @@ impl Host where Message: Send + Sync + Clone { if h.expired { return; } + io.deregister_stream(token).expect("Error deleting handshake registration"); + h.set_expired(); let originated = h.originated; let mut session = match Session::new(&mut h, &self.info.read().unwrap()) { Ok(s) => s, @@ -688,10 +697,16 @@ impl Host where Message: Send + Sync + Clone { return; } }; + if !originated { + let session_count = sessions.count(); + let ideal_peers = { self.info.read().unwrap().deref().config.ideal_peers }; + if session_count >= ideal_peers as usize { + session.disconnect(DisconnectReason::TooManyPeers); + return; + } + } let result = sessions.insert_with(move |session_token| { session.set_token(session_token); - io.deregister_stream(token).expect("Error deleting handshake registration"); - h.set_expired(); io.register_stream(session_token).expect("Error creating session registration"); self.stats.inc_sessions(); trace!(target: "network", "Creating session {} -> {}", token, session_token); diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 3c80f4148..b5dec75e2 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -26,7 +26,7 @@ use std::ops::*; use std::sync::*; use std::env; use std::collections::HashMap; -use kvdb::{Database}; +use kvdb::{Database, DBTransaction}; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay. /// @@ -36,6 +36,7 @@ use kvdb::{Database}; /// /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. +#[derive(Clone)] pub struct OverlayDB { overlay: MemoryDB, backing: Arc, @@ -57,6 +58,36 @@ impl OverlayDB { Self::new(Database::open_default(dir.to_str().unwrap()).unwrap()) } + /// Commit all operations to given batch. + pub fn commit_to_batch(&mut self, batch: &DBTransaction) -> Result { + let mut ret = 0u32; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc != 0 { + match self.payload(&key) { + Some(x) => { + let (back_value, back_rc) = x; + let total_rc: i32 = back_rc as i32 + rc; + if total_rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); + } + deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0}; + } + None => { + if rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); + } + self.put_payload_in_batch(batch, &key, (value, rc as u32)); + } + }; + ret += 1; + } + } + trace!("OverlayDB::commit() deleted {} nodes", deletes); + Ok(ret) + } + /// Commit all memory operations to the backing database. /// /// Returns either an error or the number of items changed in the backing database. @@ -95,13 +126,13 @@ impl OverlayDB { let (back_value, back_rc) = x; let total_rc: i32 = back_rc as i32 + rc; if total_rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } self.put_payload(&key, (value, rc as u32)); } @@ -136,6 +167,9 @@ impl OverlayDB { /// ``` pub fn revert(&mut self) { self.overlay.clear(); } + /// Get the number of references that would be committed. + pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(&key).map_or(0, |&(_, refs)| refs) } + /// Get the refs and value of the given key. fn payload(&self, key: &H256) -> Option<(Bytes, u32)> { self.backing.get(&key.bytes()) @@ -146,6 +180,20 @@ impl OverlayDB { }) } + /// Put the refs and value of the given key, possibly deleting it from the db. + fn put_payload_in_batch(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { + if payload.1 > 0 { + let mut s = RlpStream::new_list(2); + s.append(&payload.1); + s.append(&payload.0); + batch.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); + false + } else { + batch.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + true + } + } + /// Put the refs and value of the given key, possibly deleting it from the db. fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { if payload.1 > 0 { diff --git a/util/src/panics.rs b/util/src/panics.rs index 05d266b8b..980d4fc69 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -19,6 +19,7 @@ use std::thread; use std::ops::DerefMut; use std::sync::{Arc, Mutex}; +use std::default::Default; /// Thread-safe closure for handling possible panics pub trait OnPanicListener: Send + Sync + 'static { @@ -56,14 +57,20 @@ pub struct PanicHandler { listeners: Mutex>> } +impl Default for PanicHandler { + fn default() -> Self { + PanicHandler::new() + } +} + impl PanicHandler { /// Creates new `PanicHandler` wrapped in `Arc` - pub fn new_in_arc() -> Arc { + pub fn new_in_arc() -> Arc { Arc::new(Self::new()) } /// Creates new `PanicHandler` - pub fn new() -> PanicHandler { + pub fn new() -> Self { PanicHandler { listeners: Mutex::new(vec![]) } diff --git a/util/src/rlp/rlpin.rs b/util/src/rlp/rlpin.rs index d58fa95e8..9d3fcb2fa 100644 --- a/util/src/rlp/rlpin.rs +++ b/util/src/rlp/rlpin.rs @@ -24,7 +24,7 @@ impl<'a> From> for Rlp<'a> { } /// Data-oriented view onto trusted rlp-slice. -/// +/// /// Unlikely to `UntrustedRlp` doesn't bother you with error /// handling. It assumes that you know what you are doing. #[derive(Debug)] @@ -44,7 +44,7 @@ impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view { type Data = &'a [u8]; type Item = Rlp<'a>; type Iter = RlpIterator<'a, 'view>; - + /// Create a new instance of `Rlp` fn new(bytes: &'a [u8]) -> Rlp<'a> { Rlp { @@ -116,7 +116,7 @@ impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view { impl <'a, 'view> Rlp<'a> where 'a: 'view { fn view_as_val(r: &R) -> T where R: View<'a, 'view>, T: RlpDecodable { let res: Result = r.as_val(); - res.unwrap_or_else(|_| panic!()) + res.unwrap_or_else(|e| panic!("DecodeError: {}", e)) } /// Decode into an object diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index ba70e7b2b..7bf3d3cdd 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::ops::Deref; +use std::default::Default; use elastic_array::*; use rlp::bytes::{ToBytes, VecLike}; use rlp::{Stream, Encoder, Encodable}; @@ -44,6 +45,12 @@ pub struct RlpStream { finished_list: bool, } +impl Default for RlpStream { + fn default() -> Self { + RlpStream::new() + } +} + impl Stream for RlpStream { fn new() -> Self { RlpStream { @@ -190,8 +197,14 @@ struct BasicEncoder { bytes: ElasticArray1024, } +impl Default for BasicEncoder { + fn default() -> Self { + BasicEncoder::new() + } +} + impl BasicEncoder { - fn new() -> BasicEncoder { + fn new() -> Self { BasicEncoder { bytes: ElasticArray1024::new() } } @@ -222,7 +235,7 @@ impl Encoder for BasicEncoder { // just 0 0 => self.bytes.push(0x80u8), // byte is its own encoding if < 0x80 - 1 => { + 1 => { value.to_bytes(&mut self.bytes); let len = self.bytes.len(); let last_byte = self.bytes[len - 1]; diff --git a/util/src/table.rs b/util/src/table.rs index e41209608..c3b2006cf 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -16,6 +16,7 @@ //! A collection associating pair of keys (row and column) with a single value. +use std::default::Default; use std::hash::Hash; use std::collections::HashMap; @@ -30,11 +31,21 @@ pub struct Table map: HashMap>, } +impl Default for Table + where Row: Eq + Hash + Clone, + Col: Eq + Hash { + fn default() -> Self { + Table::new() + } +} + +// There is default but clippy does not detect it? +#[cfg_attr(feature="dev", allow(new_without_default))] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash { /// Creates new Table - pub fn new() -> Table { + pub fn new() -> Self { Table { map: HashMap::new(), } diff --git a/util/src/trie/journal.rs b/util/src/trie/journal.rs index db16a313d..4ffd7cf5c 100644 --- a/util/src/trie/journal.rs +++ b/util/src/trie/journal.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::default::Default; use sha3::*; use hash::H256; use bytes::*; @@ -39,6 +40,12 @@ pub struct Score { #[derive(Debug)] pub struct Journal (Vec); +impl Default for Journal { + fn default() -> Self { + Journal::new() + } +} + impl Journal { /// Create a new, empty, object. pub fn new() -> Journal { Journal(vec![]) } diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index c4b5e120c..06076d273 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -22,7 +22,7 @@ use super::trietraits::*; use super::node::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -63,16 +63,16 @@ impl<'db> TrieDB<'db> { flushln!("TrieDB::new({}): Trie root not found!", root); panic!("Trie root not found!"); } - TrieDB { - db: db, + TrieDB { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -142,7 +142,7 @@ impl<'db> TrieDB<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -358,7 +358,7 @@ impl<'db> fmt::Debug for TrieDB<'db> { fn iterator() { use memorydb::*; use super::triedbmut::*; - + let d = vec![ &b"A"[..], &b"AA"[..], &b"AB"[..], &b"B"[..] ]; let mut memdb = MemoryDB::new(); diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 829c1e518..3d75fa3e1 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -23,7 +23,7 @@ use super::journal::*; use super::trietraits::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -71,16 +71,16 @@ impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { let mut r = TrieDBMut{ - db: db, + db: db, root: root, - hash_count: 0 - }; + hash_count: 0 + }; // set root rlp - *r.root = SHA3_NULL_RLP.clone(); - r + *r.root = SHA3_NULL_RLP.clone(); + r } /// Create a new trie with the backing database `db` and `root`. @@ -91,21 +91,21 @@ impl<'db> TrieDBMut<'db> { flushln!("Trie root not found {}", root); panic!("Trie root not found!"); } - TrieDBMut { - db: db, + TrieDBMut { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Get the backing database. - pub fn db_mut(&'db mut self) -> &'db mut HashDB { - self.db + pub fn db_mut(&'db mut self) -> &'db mut HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -184,7 +184,7 @@ impl<'db> TrieDBMut<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -378,7 +378,7 @@ impl<'db> TrieDBMut<'db> { // original had empty slot - place a leaf there. true if old_rlp.at(i).is_empty() => journal.new_node(Self::compose_leaf(&partial.mid(1), value), &mut s), // original has something there already; augment. - true => { + true => { let new = self.augmented(self.take_node(&old_rlp.at(i), journal), &partial.mid(1), value, journal); journal.new_node(new, &mut s); }