diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 58036eb88..73afd1cfc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,9 +19,11 @@ linux-stable: script: - cargo build --release --verbose - strip target/release/parity + - md5sum target/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity.md5 --body parity.md5 tags: - rust - rust-stable @@ -40,9 +42,11 @@ linux-stable-14.04: script: - cargo build --release --verbose - strip target/release/parity + - md5sum target/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity.md5 --body parity.md5 tags: - rust - rust-14.04 @@ -101,9 +105,11 @@ linux-centos: - export CC="gcc" - cargo build --release --verbose - strip target/release/parity + - md5sum target/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity.md5 --body parity.md5 tags: - rust - rust-centos @@ -127,9 +133,11 @@ linux-armv7: - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity + - md5sum target/armv7-unknown-linux-gnueabihf/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity.md5 --body parity.md5 tags: - rust - rust-arm @@ -154,9 +162,11 @@ linux-arm: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity + - md5sum target/arm-unknown-linux-gnueabihf/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity.md5 --body parity.md5 tags: - rust - rust-arm @@ -181,9 +191,11 @@ linux-armv6: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release --verbose - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity + - md5sum target/arm-unknown-linux-gnueabi/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity.md5 --body parity.md5 tags: - rust - rust-arm @@ -208,9 +220,11 @@ linux-aarch64: - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release --verbose - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity + - md5sum target/aarch64-unknown-linux-gnu/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity.md5 --body parity.md5 tags: - rust - rust-arm @@ -228,9 +242,11 @@ darwin: - stable script: - cargo build --release --verbose + - md5sum target/release/parity >> parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity.md5 --body parity.md5 tags: - osx artifacts: @@ -248,24 +264,49 @@ windows: - set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt - set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64 - set RUST_BACKTRACE=1 + - set RUSTFLAGS=%RUSTFLAGS% -Zorbit=off -D warnings - rustup default stable-x86_64-pc-windows-msvc - cargo build --release --verbose + - curl -sL --url "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -o nsis\SimpleFC.dll + - curl -sL --url "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -o nsis\vc_redist.x64.exe + - signtool sign /f %keyfile% /p %certpass% target\release\parity.exe + - cd nsis + - makensis.exe installer.nsi + - cp installer.exe InstallParity.exe + - signtool sign /f %keyfile% /p %certpass% InstallParity.exe + - md5sums InstallParity.exe > InstallParity.exe.md5 + - zip win-installer.zip InstallParity.exe InstallParity.exe.md5 + - md5sums win-installer.zip > win-installer.zip.md5 + - cd ..\target\release\ + - md5sums parity.exe parity.pdb > parity.md5 + - md5sums parity.exe > parity.exe.md5 + - zip parity.zip parity.exe parity.pdb parity.md5 + - md5sums parity.zip > parity.zip.md5 + - cd ..\.. - aws configure set aws_access_key_id %s3_key% - aws configure set aws_secret_access_key %s3_secret% - - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe - - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.exe --body target\release\parity.exe + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.exe.md5 --body target\release\parity.exe.md5 + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.zip --body target\release\parity.zip + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.zip.md5 --body target\release\parity.zip.md5 + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/InstallParity.exe --body nsis\InstallParity.exe + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/InstallParity.exe.md5 --body nsis\InstallParity.exe.md5 + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/win-installer.zip --body nsis\win-installer.zip + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/win-installer.zip.md5 --body nsis\win-installer.zip.md5 tags: - rust-windows artifacts: paths: - target/release/parity.exe - target/release/parity.pdb + - nsis/installer.exe name: "x86_64-pc-windows-msvc_parity" test-linux: stage: test before_script: - git submodule update --init --recursive script: + - export RUST_BACKTRACE=1 - ./test.sh --verbose tags: - rust-test diff --git a/.travis.yml b/.travis.yml index 6428ccecf..d9cda5715 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,6 +31,7 @@ env: - RUN_COVERAGE="false" - RUN_DOCS="false" - TEST_OPTIONS="" + - RUSTFLAGS="-D warnings" # GH_TOKEN for documentation - secure: bumJASbZSU8bxJ0EyPUJmu16AiV9EXOpyOj86Jlq/Ty9CfwGqsSXt96uDyE+OUJf34RUFQMsw0nk37/zC4lcn6kqk2wpuH3N/o85Zo/cVZY/NusBWLQqtT5VbYWsV+u2Ua4Tmmsw8yVYQhYwU2ZOejNpflL+Cs9XGgORp1L+/gMRMC2y5Se6ZhwnKPQlRJ8LGsG1dzjQULxzADIt3/zuspNBS8a2urJwlHfGMkvHDoUWCviP/GXoSqw3TZR7FmKyxE19I8n9+iSvm9+oZZquvcgfUxMHn8Gq/b44UbPvjtFOg2yam4xdWXF/RyWCHdc/R9EHorSABeCbefIsm+zcUF3/YQxwpSxM4IZEeH2rTiC7dcrsKw3XsO16xFQz5YI5Bay+CT/wTdMmJd7DdYz7Dyf+pOvcM9WOf/zorxYWSBOMYy0uzbusU2iyIghQ82s7E/Ahg+WARtPgkuTLSB5aL1oCTBKHqQscMr7lo5Ti6RpWLxEdTQMBznc+bMr+6dEtkEcG9zqc6cE9XX+ox3wTU6+HVMfQ1ltCntJ4UKcw3A6INEbw9wgocQa812CIASQ2fE+SCAbz6JxBjIAlFUnD1lUB7S8PdMPwn9plfQgKQ2A5YZqg6FnBdf0rQXIJYxQWKHXj/rBHSUCT0tHACDlzTA+EwWggvkP5AGIxRxm8jhw= - KCOV_CMD="./kcov-master/tmp/usr/local/bin/kcov" diff --git a/Cargo.lock b/Cargo.lock index 52bdee494..a092325ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -120,6 +120,11 @@ name = "bloomchain" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bytes" version = "0.3.0" @@ -235,7 +240,7 @@ version = "0.5.4" source = "git+https://github.com/ethcore/rust-secp256k1#a9a0b1be1f39560ca86e8fc8e55e205a753ff25c" dependencies = [ "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -270,10 +275,12 @@ version = "1.4.0" dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", + "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", "ethcore-ipc 1.4.0", @@ -288,6 +295,7 @@ dependencies = [ "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -300,7 +308,7 @@ dependencies = [ [[package]] name = "ethcore-bigint" -version = "0.1.0" +version = "0.1.1" dependencies = [ "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -308,6 +316,10 @@ dependencies = [ "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethcore-bloom-journal" +version = "0.1.0" + [[package]] name = "ethcore-dapps" version = "1.4.0" @@ -317,7 +329,7 @@ dependencies = [ "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", - "https-fetch 0.1.0", + "fetch 0.1.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", @@ -387,6 +399,7 @@ dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -466,6 +479,7 @@ dependencies = [ "ethkey 0.2.0", "ethstore 0.1.0", "ethsync 1.4.0", + "fetch 0.1.0", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", @@ -525,7 +539,8 @@ dependencies = [ "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", - "ethcore-bigint 0.1.0", + "ethcore-bigint 0.1.1", + "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -553,7 +568,7 @@ name = "ethcrypto" version = "0.1.0" dependencies = [ "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", - "ethcore-bigint 0.1.0", + "ethcore-bigint 0.1.1", "ethkey 0.2.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -576,7 +591,7 @@ version = "0.2.0" dependencies = [ "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", - "ethcore-bigint 0.1.0", + "ethcore-bigint 0.1.1", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -639,6 +654,16 @@ dependencies = [ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "fetch" +version = "0.1.0" +dependencies = [ + "https-fetch 0.1.0", + "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "flate2" version = "0.2.14" @@ -650,8 +675,11 @@ dependencies = [ [[package]] name = "gcc" -version = "0.3.28" +version = "0.3.35" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "glob" @@ -690,7 +718,7 @@ version = "0.1.0" dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", - "rustls 0.1.1 (git+https://github.com/ctz/rustls)", + "rustls 0.1.2 (git+https://github.com/ctz/rustls)", ] [[package]] @@ -852,6 +880,11 @@ name = "libc" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "linked-hash-map" +version = "0.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "linked-hash-map" version = "0.3.0" @@ -862,6 +895,14 @@ name = "log" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "lru-cache" +version = "0.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "matches" version = "0.1.2" @@ -899,7 +940,7 @@ name = "miniz-sys" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -985,7 +1026,7 @@ name = "nanomsg-sys" version = "0.5.0" source = "git+https://github.com/ethcore/nanomsg.rs.git#c40fe442c9afaea5b38009a3d992ca044dcceb00" dependencies = [ - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1345,7 +1386,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ring" -version = "0.3.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1357,7 +1398,7 @@ name = "rlp" version = "0.1.0" dependencies = [ "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-bigint 0.1.0", + "ethcore-bigint 0.1.1", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1365,7 +1406,7 @@ dependencies = [ [[package]] name = "rocksdb" version = "0.4.5" -source = "git+https://github.com/ethcore/rust-rocksdb#485dd747a2c9a9f910fc8ac696fc9edf5fa22aa3" +source = "git+https://github.com/ethcore/rust-rocksdb#ffc7c82380fe8569f85ae6743f7f620af2d4a679" dependencies = [ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)", @@ -1374,9 +1415,9 @@ dependencies = [ [[package]] name = "rocksdb-sys" version = "0.3.0" -source = "git+https://github.com/ethcore/rust-rocksdb#485dd747a2c9a9f910fc8ac696fc9edf5fa22aa3" +source = "git+https://github.com/ethcore/rust-rocksdb#ffc7c82380fe8569f85ae6743f7f620af2d4a679" dependencies = [ - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1407,7 +1448,7 @@ name = "rust-crypto" version = "0.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1429,15 +1470,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.1.1" -source = "git+https://github.com/ctz/rustls#a9c5a79f49337e22ac05bb1ea114240bdbe0fdd2" +version = "0.1.2" +source = "git+https://github.com/ctz/rustls#3d2db624997004b7b18ba4463d6081f37598b2f5" dependencies = [ "base64 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1500,7 +1541,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "sha3" version = "0.1.0" dependencies = [ - "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1788,10 +1829,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "webpki" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1871,6 +1912,7 @@ dependencies = [ "checksum bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f67931368edf3a9a51d29886d245f1c3db2f1ef0dcc9e35ff70341b78c10d23" "checksum blastfig 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09640e0509d97d5cdff03a9f5daf087a8e04c735c3b113a75139634a19cfc7b2" "checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d" +"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" @@ -1888,7 +1930,7 @@ dependencies = [ "checksum eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)" = "" "checksum ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0c53453517f620847be51943db329276ae52f2e210cfc659e81182864be2f" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" -"checksum gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)" = "3da3a2cbaeb01363c8e3704fd9fd0eb2ceb17c6f27abd4c1ef040fb57d20dc79" +"checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" "checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" @@ -1903,14 +1945,16 @@ dependencies = [ "checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1" "checksum json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)" = "" "checksum json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)" = "" -"checksum jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e913b3c809aab9378889da8b990b4a46b98bd4794c8117946a1cf63c5f87bcde" +"checksum jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3c5094610b07f28f3edaf3947b732dadb31dbba4941d4d0c1c7a8350208f4414" "checksum jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "23e3757828fa702a20072c37ff47938e9dd331b92fac6e223d26d4b7a55f7ee2" +"checksum linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "83f7ff3baae999fdf921cccf54b61842bb3b26868d50d02dff48052ebec8dd79" "checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" "checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" +"checksum lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "42d50dcb5d9f145df83b1043207e1ac0c37c9c779c4e128ca4655abc3f3cbf8c" "checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" "checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2" @@ -1964,7 +2008,7 @@ dependencies = [ "checksum rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "655df67c314c30fa3055a365eae276eb88aa4f3413a352a1ab32c1320eda41ea" "checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29" "checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9" -"checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb" +"checksum ring 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0d2f6547bf9640f1d3cc4e771f82374ec8fd237c17eeb3ff5cd5ccbe22377a09" "checksum rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)" = "" "checksum rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)" = "" "checksum rotor 0.6.3 (git+https://github.com/ethcore/rotor)" = "" @@ -1972,7 +2016,7 @@ dependencies = [ "checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a" "checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b" "checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084" -"checksum rustls 0.1.1 (git+https://github.com/ctz/rustls)" = "" +"checksum rustls 0.1.2 (git+https://github.com/ctz/rustls)" = "" "checksum semver 0.1.20 (registry+https://github.com/rust-lang/crates.io-index)" = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" "checksum semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d5b7638a1f03815d94e88cb3b3c08e87f0db4d683ef499d1836aaf70a45623f" "checksum serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b1dfda9ebb31d29fa8b94d7eb3031a86a8dcec065f0fe268a30f98867bf45775" @@ -2017,7 +2061,7 @@ dependencies = [ "checksum vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0795a11576d29ae80525a3fda315bf7b534f8feb9d34101e5fe63fb95bb2fd24" "checksum vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56b639f935488eb40f06d17c3e3bcc3054f6f75d264e187b1107c8d1cba8d31c" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -"checksum webpki 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5dc10a815fabbb0c3145c1153240528f3a8703a47e26e8dbb4a5d4f6386200ad" +"checksum webpki 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "813503a5985585e0812d430cd1328ee322f47f66629c8ed4ecab939cf9e92f91" "checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum ws 0.5.2 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "" diff --git a/Cargo.toml b/Cargo.toml index 84edb6c1e..38003e7a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ default = ["ui", "use-precompiled-js", "ipc"] ui = ["dapps", "ethcore-signer/ui"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] dapps = ["ethcore-dapps"] -ipc = ["ethcore/ipc"] +ipc = ["ethcore/ipc", "ethsync/ipc"] jit = ["ethcore/jit"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] @@ -70,12 +70,14 @@ stratum = ["ipc"] ethkey-cli = ["ethcore/ethkey-cli"] ethstore-cli = ["ethcore/ethstore-cli"] evm-debug = ["ethcore/evm-debug"] +evm-debug-tests = ["ethcore/evm-debug-tests"] +slow-blocks = ["ethcore/slow-blocks"] [[bin]] path = "parity/main.rs" name = "parity" [profile.release] -debug = true +debug = false lto = false diff --git a/README.md b/README.md index d5fb5f044..a6c987a69 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ [Internal Documentation][doc-url] + Be sure to check out [our wiki][wiki-url] for more information. [travis-image]: https://travis-ci.org/ethcore/parity.svg?branch=master @@ -18,8 +19,11 @@ Be sure to check out [our wiki][wiki-url] for more information. [doc-url]: https://ethcore.github.io/parity/ethcore/index.html [wiki-url]: https://github.com/ethcore/parity/wiki +**Requires Rust version 1.12.0 to build** + ---- + ## About Parity Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and @@ -96,9 +100,9 @@ and Parity will begin syncing the Ethereum blockchain. ### Using systemd service file To start Parity as a regular user using systemd init: -1. Copy ```parity/scripts/parity.service``` to your -systemd user directory (usually ```~/.config/systemd/user```). -2. To pass any argument to Parity, write a ```~/.parity/parity.conf``` file this way: -```ARGS="ARG1 ARG2 ARG3"```. +1. Copy `parity/scripts/parity.service` to your +systemd user directory (usually `~/.config/systemd/user`). +2. To pass any argument to Parity, write a `~/.parity/parity.conf` file this way: +`ARGS="ARG1 ARG2 ARG3"`. - Example: ```ARGS="ui --geth --identity MyMachine"```. + Example: `ARGS="ui --geth --identity MyMachine"`. diff --git a/appveyor.yml b/appveyor.yml index 3ffaa961e..75a2da7cb 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,6 +6,7 @@ environment: certpass: secure: 0BgXJqxq9Ei34/hZ7121FQ== keyfile: C:\users\appveyor\Certificates.p12 + RUSTFLAGS: -Zorbit=off -D warnings branches: only: @@ -18,10 +19,10 @@ branches: install: - git submodule update --init --recursive - ps: Install-Product node 6 - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.10.0-x86_64-pc-windows-msvc.exe" + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.12.0-x86_64-pc-windows-msvc.exe" - ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -FileName nsis\SimpleFC.dll - ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -FileName nsis\vc_redist.x64.exe - - rust-1.10.0-x86_64-pc-windows-msvc.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - rust-1.12.0-x86_64-pc-windows-msvc.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin;C:\Program Files (x86)\NSIS;C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Bin - rustc -V - cargo -V diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index b1883e748..1019e5460 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -26,7 +26,7 @@ linked-hash-map = "0.3" ethcore-devtools = { path = "../devtools" } ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } -https-fetch = { path = "../util/https-fetch" } +fetch = { path = "../util/fetch" } parity-dapps = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } # List of apps parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 9a8dfef95..80c5e09de 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -92,11 +92,14 @@ impl server::Handler for RestApiRouter { } let url = url.expect("Check for None early-exists above; qed"); - let path = self.path.take().expect("on_request called only once, and path is always defined in new; qed"); + let mut path = self.path.take().expect("on_request called only once, and path is always defined in new; qed"); let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed"); let endpoint = url.path.get(1).map(|v| v.as_str()); let hash = url.path.get(2).map(|v| v.as_str()); + // at this point path.app_id contains 'api', adjust it to the hash properly, otherwise + // we will try and retrieve 'api' as the hash when doing the /api/content route + if let Some(hash) = hash.clone() { path.app_id = hash.to_owned() } let handler = endpoint.and_then(|v| match v { "apps" => Some(as_json(&self.api.list_apps())), diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 8702e4706..2e1328858 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -122,7 +122,7 @@ impl ContentFetcher { }, // We need to start fetching app None => { - trace!(target: "dapps", "Content unavailable. Fetching..."); + trace!(target: "dapps", "Content unavailable. Fetching... {:?}", content_id); let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true."); let content = self.resolver.resolve(content_hex); @@ -415,4 +415,3 @@ mod tests { assert_eq!(fetcher.contains("test3"), false); } } - diff --git a/dapps/src/handlers/client/mod.rs b/dapps/src/handlers/client/mod.rs deleted file mode 100644 index 3d8551e8a..000000000 --- a/dapps/src/handlers/client/mod.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Hyper Client Handlers - -pub mod fetch_file; - -use std::env; -use std::sync::{mpsc, Arc}; -use std::sync::atomic::AtomicBool; -use std::path::PathBuf; - -use hyper; -use https_fetch as https; - -use random_filename; -use self::fetch_file::{Fetch, Error as HttpFetchError}; - -pub type FetchResult = Result; - -#[derive(Debug)] -pub enum FetchError { - InvalidUrl, - Http(HttpFetchError), - Https(https::FetchError), - Other(String), -} - -impl From for FetchError { - fn from(e: HttpFetchError) -> Self { - FetchError::Http(e) - } -} - -pub struct Client { - http_client: hyper::Client, - https_client: https::Client, -} - -impl Client { - pub fn new() -> Self { - Client { - http_client: hyper::Client::new().expect("Unable to initialize http client."), - https_client: https::Client::new().expect("Unable to initialize https client."), - } - } - - pub fn close(self) { - self.http_client.close(); - self.https_client.close(); - } - - pub fn request(&mut self, url: &str, abort: Arc, on_done: Box) -> Result, FetchError> { - let is_https = url.starts_with("https://"); - let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl)); - trace!(target: "dapps", "Fetching from: {:?}", url); - if is_https { - let url = try!(Self::convert_url(url)); - - let (tx, rx) = mpsc::channel(); - let temp_path = Self::temp_path(); - let res = self.https_client.fetch_to_file(url, temp_path.clone(), abort, move |result| { - let res = tx.send( - result.map(|_| temp_path).map_err(FetchError::Https) - ); - if let Err(_) = res { - warn!("Fetch finished, but no one was listening"); - } - on_done(); - }); - - match res { - Ok(_) => Ok(rx), - Err(e) => Err(FetchError::Other(format!("{:?}", e))), - } - } else { - let (tx, rx) = mpsc::channel(); - let res = self.http_client.request(url, Fetch::new(tx, abort, on_done)); - - match res { - Ok(_) => Ok(rx), - Err(e) => Err(FetchError::Other(format!("{:?}", e))), - } - } - } - - fn convert_url(url: hyper::Url) -> Result { - let host = format!("{}", try!(url.host().ok_or(FetchError::InvalidUrl))); - let port = try!(url.port_or_known_default().ok_or(FetchError::InvalidUrl)); - https::Url::new(&host, port, url.path()).map_err(|_| FetchError::InvalidUrl) - } - - fn temp_path() -> PathBuf { - let mut dir = env::temp_dir(); - dir.push(random_filename()); - dir - } -} - - diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index c463d3710..639fc7497 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -22,13 +22,13 @@ use std::sync::{mpsc, Arc}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Instant, Duration}; use util::Mutex; +use fetch::{Client, Fetch, FetchResult}; use hyper::{server, Decoder, Encoder, Next, Method, Control}; use hyper::net::HttpStream; use hyper::status::StatusCode; use handlers::{ContentHandler, Redirection}; -use handlers::client::{Client, FetchResult}; use apps::redirection_address; use page::LocalPageEndpoint; @@ -159,7 +159,7 @@ impl ContentFetcherHandler { handler: H) -> (Self, Arc) { let fetch_control = Arc::new(FetchControl::default()); - let client = Client::new(); + let client = Client::default(); let handler = ContentFetcherHandler { fetch_control: fetch_control.clone(), control: Some(control), diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index 62b13eaa8..54644fe8d 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -21,7 +21,6 @@ mod echo; mod content; mod redirect; mod fetch; -pub mod client; pub use self::auth::AuthRequiredHandler; pub use self::echo::EchoHandler; diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index edc0bebe5..cac42f893 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -58,10 +58,10 @@ extern crate jsonrpc_http_server; extern crate mime_guess; extern crate rustc_serialize; extern crate parity_dapps; -extern crate https_fetch; extern crate ethcore_rpc; extern crate ethcore_util as util; extern crate linked_hash_map; +extern crate fetch; #[cfg(test)] extern crate ethcore_devtools as devtools; diff --git a/db/src/database.rs b/db/src/database.rs index 9a52822f6..e1774159b 100644 --- a/db/src/database.rs +++ b/db/src/database.rs @@ -157,7 +157,7 @@ impl Drop for Database { } } -#[derive(Ipc)] +#[ipc] impl DatabaseService for Database { fn open(&self, config: DatabaseConfig, path: String) -> Result<(), Error> { let mut db = self.db.write(); diff --git a/ethash/src/compute.rs b/ethash/src/compute.rs index a99a0e3b5..6fcf17cf1 100644 --- a/ethash/src/compute.rs +++ b/ethash/src/compute.rs @@ -202,6 +202,9 @@ impl SeedHashCompute { } } +pub fn slow_get_seedhash(block_number: u64) -> H256 { + SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH) +} #[inline] fn fnv_hash(x: u32, y: u32) -> u32 { diff --git a/ethash/src/lib.rs b/ethash/src/lib.rs index 8fb2c43f6..130882318 100644 --- a/ethash/src/lib.rs +++ b/ethash/src/lib.rs @@ -26,7 +26,7 @@ mod compute; use std::mem; use compute::Light; -pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty}; +pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash}; use std::sync::Arc; use parking_lot::Mutex; diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 3ad9e69c4..7bda7e567 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -37,6 +37,9 @@ ethkey = { path = "../ethkey" } ethcore-ipc-nano = { path = "../ipc/nano" } rlp = { path = "../util/rlp" } rand = "0.3" +lru-cache = "0.0.7" +ethcore-bloom-journal = { path = "../util/bloom" } +byteorder = "0.5" [dependencies.hyper] git = "https://github.com/ethcore/hyper" @@ -44,7 +47,9 @@ default-features = false [features] jit = ["evmjit"] -evm-debug = [] +evm-debug = ["slow-blocks"] +evm-debug-tests = ["evm-debug"] +slow-blocks = [] # Use SLOW_TX_DURATION="50" (compile time!) to track transactions over 50ms json-tests = [] test-heavy = [] dev = ["clippy"] diff --git a/ethcore/build.rs b/ethcore/build.rs index b83955708..5a3a3f0ba 100644 --- a/ethcore/build.rs +++ b/ethcore/build.rs @@ -18,7 +18,7 @@ extern crate ethcore_ipc_codegen; fn main() { ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap(); - ethcore_ipc_codegen::derive_ipc("src/client/traits.rs").unwrap(); - ethcore_ipc_codegen::derive_ipc("src/snapshot/snapshot_service_trait.rs").unwrap(); - ethcore_ipc_codegen::derive_ipc("src/client/chain_notify.rs").unwrap(); + ethcore_ipc_codegen::derive_ipc_cond("src/client/traits.rs", cfg!(feature="ipc")).unwrap(); + ethcore_ipc_codegen::derive_ipc_cond("src/snapshot/snapshot_service_trait.rs", cfg!(feature="ipc")).unwrap(); + ethcore_ipc_codegen::derive_ipc_cond("src/client/chain_notify.rs", cfg!(feature="ipc")).unwrap(); } diff --git a/ethcore/res/ethereum/expanse.json b/ethcore/res/ethereum/expanse.json new file mode 100644 index 000000000..9b005096b --- /dev/null +++ b/ethcore/res/ethereum/expanse.json @@ -0,0 +1,69 @@ +{ + "name": "Expanse", + "forkName": "expanse", + "engine": { + "Ethash": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "minimumDifficulty": "0x020000", + "difficultyBoundDivisor": "0x0800", + "difficultyIncrementDivisor": "60", + "durationLimit": "0x3C", + "blockReward": "0x6f05b59d3b200000", + "registrar" : "0x6c221ca53705f3497ec90ca7b84c59ae7382fc21", + "frontierCompatibilityModeLimit": "0x30d40", + "difficultyHardforkTransition": "0x59d9", + "difficultyHardforkBoundDivisor": "0x0200", + "bombDefuseTransition": "0x30d40" + } + } + }, + "params": { + "accountStartNonce": "0x00", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID": "0x1", + "subprotocolName": "exp" + }, + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x214652414e4b4f21", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x40000000", + "author": "0x93decab0cd745598860f782ac1e8f046cb99e898", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x4672616e6b6f497346726565646f6d", + "gasLimit": "0x1388" + }, + "nodes": [ + "enode://7f335a047654f3e70d6f91312a7cf89c39704011f1a584e2698250db3d63817e74b88e26b7854111e16b2c9d0c7173c05419aeee2d0321850227b126d8b1be3f@46.101.156.249:42786", + "enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786", + "enode://96d3919b903e7f5ad59ac2f73c43be9172d9d27e2771355db03fd194732b795829a31fe2ea6de109d0804786c39a807e155f065b4b94c6fce167becd0ac02383@45.55.22.34:42786", + "enode://5f6c625bf287e3c08aad568de42d868781e961cbda805c8397cfb7be97e229419bef9a5a25a75f97632787106bba8a7caf9060fab3887ad2cfbeb182ab0f433f@46.101.182.53:42786", + "enode://d33a8d4c2c38a08971ed975b750f21d54c927c0bf7415931e214465a8d01651ecffe4401e1db913f398383381413c78105656d665d83f385244ab302d6138414@128.199.183.48:42786", + "enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786", + "enode://f6f0d6b9b7d02ec9e8e4a16e38675f3621ea5e69860c739a65c1597ca28aefb3cec7a6d84e471ac927d42a1b64c1cbdefad75e7ce8872d57548ddcece20afdd1@159.203.64.95:42786" + ], + "accounts": { + "0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "bb94f0ceb32257275b2a7a9c094c13e469b4563e": { + "balance": "10000000000000000000000000" + }, + "15656715068ab0dbdf0ab00748a8a19e40f28192": { + "balance": "1000000000000000000000000" + }, + "c075fa11f85bda3aaba67106226aaf086ac16f4e": { + "balance": "100000000000000000000000" + }, + "93decab0cd745598860f782ac1e8f046cb99e898": { + "balance": "10000000000000000000000" + } + } +} diff --git a/ethcore/res/ethereum/frontier.json b/ethcore/res/ethereum/frontier.json index 2f91809fc..903e87cc7 100644 --- a/ethcore/res/ethereum/frontier.json +++ b/ethcore/res/ethereum/frontier.json @@ -157,6 +157,9 @@ "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" }, "nodes": [ + "enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@136.243.154.245:30303", + "enode://bcc7240543fe2cf86f5e9093d05753dd83343f8fda7bf0e833f65985c73afccf8f981301e13ef49c4804491eab043647374df1c4adf85766af88a624ecc3330e@136.243.154.244:30303", + "enode://ed4227681ca8c70beb2277b9e870353a9693f12e7c548c35df6bca6a956934d6f659999c2decb31f75ce217822eefca149ace914f1cbe461ed5a2ebaf9501455@88.212.206.70:30303", "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", diff --git a/ethcore/res/ethereum/morden.json b/ethcore/res/ethereum/morden.json index 0d643e4c0..ef18df97d 100644 --- a/ethcore/res/ethereum/morden.json +++ b/ethcore/res/ethereum/morden.json @@ -8,7 +8,7 @@ "difficultyBoundDivisor": "0x0800", "durationLimit": "0x0d", "blockReward": "0x4563918244F40000", - "registrar": "0x8e4e9b13d4b45cb0befc93c3061b1408f67316b2", + "registrar": "0x52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d", "frontierCompatibilityModeLimit": "0x789b0" } } diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index ac5475d67..8f07dbc82 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit ac5475d676536cb945f98e9ff98384c01abd0599 +Subproject commit 8f07dbc8294a32db5ebe8098925fcefc2eab3e71 diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index a02b670d0..2d00f8ed5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -121,6 +121,10 @@ impl<'db> HashDB for AccountDB<'db>{ fn remove(&mut self, _key: &H256) { unimplemented!() } + + fn get_aux(&self, hash: &[u8]) -> Option> { + self.db.get_aux(hash) + } } /// DB backend wrapper for Account trie @@ -193,6 +197,18 @@ impl<'db> HashDB for AccountDBMut<'db>{ let key = combine_key(&self.address_hash, key); self.db.remove(&key) } + + fn insert_aux(&mut self, hash: Vec, value: Vec) { + self.db.insert_aux(hash, value); + } + + fn get_aux(&self, hash: &[u8]) -> Option> { + self.db.get_aux(hash) + } + + fn remove_aux(&mut self, hash: &[u8]) { + self.db.remove_aux(hash); + } } struct Wrapping<'db>(&'db HashDB); diff --git a/ethcore/src/action_params.rs b/ethcore/src/action_params.rs index 1886c3d36..46c159269 100644 --- a/ethcore/src/action_params.rs +++ b/ethcore/src/action_params.rs @@ -43,6 +43,8 @@ impl ActionValue { pub struct ActionParams { /// Address of currently executed code. pub code_address: Address, + /// Hash of currently executed code. + pub code_hash: H256, /// Receive address. Usually equal to code_address, /// except when called using CALLCODE. pub address: Address, @@ -57,7 +59,7 @@ pub struct ActionParams { /// Transaction value. pub value: ActionValue, /// Code being executed. - pub code: Option, + pub code: Option>, /// Input data. pub data: Option, /// Type of call @@ -70,6 +72,7 @@ impl Default for ActionParams { fn default() -> ActionParams { ActionParams { code_address: Address::new(), + code_hash: SHA3_EMPTY, address: Address::new(), sender: Address::new(), origin: Address::new(), @@ -88,10 +91,11 @@ impl From for ActionParams { let address: Address = t.address.into(); ActionParams { code_address: Address::new(), + code_hash: (&*t.code).sha3(), address: address, sender: t.sender.into(), origin: t.origin.into(), - code: Some(t.code.into()), + code: Some(Arc::new(t.code.into())), data: Some(t.data.into()), gas: t.gas.into(), gas_price: t.gas_price.into(), diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index b35b4dc1a..80c35d1d0 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,13 +16,26 @@ //! Blockchain block. -use common::*; +use std::sync::Arc; +use std::collections::HashSet; + +use rlp::{UntrustedRlp, RlpStream, Encodable, Decodable, Decoder, DecoderError, View, Stream}; +use util::{Bytes, Address, Uint, FixedHash, Hashable, U256, H256, ordered_trie_root, SHA3_NULL_RLP}; +use util::error::{Mismatch, OutOfBounds}; + +use basic_types::{LogBloom, Seal}; +use env_info::{EnvInfo, LastHashes}; use engines::Engine; -use state::*; -use verification::PreverifiedBlock; -use trace::FlatTrace; +use error::{Error, BlockError, TransactionError}; use factory::Factories; -use rlp::*; +use header::Header; +use receipt::Receipt; +use state::State; +use state_db::StateDB; +use trace::FlatTrace; +use transaction::SignedTransaction; +use verification::PreverifiedBlock; +use views::BlockView; /// A block, encoded as it is on the block chain. #[derive(Default, Debug, Clone, PartialEq)] @@ -69,7 +82,7 @@ impl Decodable for Block { } } -/// Internal type for a block's common elements. +/// An internal type for a block's common elements. #[derive(Clone)] pub struct ExecutedBlock { base: Block, @@ -179,7 +192,7 @@ pub trait IsBlock { /// Trait for a object that has a state database. pub trait Drain { /// Drop this object and return the underlieing database. - fn drain(self) -> Box; + fn drain(self) -> StateDB; } impl IsBlock for ExecutedBlock { @@ -205,6 +218,7 @@ pub struct ClosedBlock { block: ExecutedBlock, uncle_bytes: Bytes, last_hashes: Arc, + unclosed_state: State, } /// Just like `ClosedBlock` except that we can't reopen it and it's faster. @@ -231,7 +245,7 @@ impl<'x> OpenBlock<'x> { engine: &'x Engine, factories: Factories, tracing: bool, - db: Box, + db: StateDB, parent: &Header, last_hashes: Arc, author: Address, @@ -346,8 +360,7 @@ impl<'x> OpenBlock<'x> { pub fn close(self) -> ClosedBlock { let mut s = self; - // take a snapshot so the engine's changes can be rolled back. - s.block.state.snapshot(); + let unclosed_state = s.block.state.clone(); s.engine.on_close_block(&mut s.block); s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); @@ -362,6 +375,7 @@ impl<'x> OpenBlock<'x> { block: s.block, uncle_bytes: uncle_bytes, last_hashes: s.last_hashes, + unclosed_state: unclosed_state, } } @@ -369,9 +383,6 @@ impl<'x> OpenBlock<'x> { pub fn close_and_lock(self) -> LockedBlock { let mut s = self; - // take a snapshot so the engine's changes can be rolled back. - s.block.state.snapshot(); - s.engine.on_close_block(&mut s.block); if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP { s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); @@ -388,11 +399,10 @@ impl<'x> OpenBlock<'x> { s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); - ClosedBlock { + LockedBlock { block: s.block, uncle_bytes: uncle_bytes, - last_hashes: s.last_hashes, - }.lock() + } } } @@ -413,17 +423,7 @@ impl ClosedBlock { pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } /// Turn this into a `LockedBlock`, unable to be reopened again. - pub fn lock(mut self) -> LockedBlock { - // finalize the changes made by the engine. - self.block.state.clear_snapshot(); - if let Err(e) = self.block.state.commit() { - warn!("Error committing closed block's state: {:?}", e); - } - - // set the state root here, after commit recalculates with the block - // rewards. - self.block.base.header.set_state_root(self.block.state.root().clone()); - + pub fn lock(self) -> LockedBlock { LockedBlock { block: self.block, uncle_bytes: self.uncle_bytes, @@ -431,12 +431,12 @@ impl ClosedBlock { } /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen(mut self, engine: &Engine) -> OpenBlock { + pub fn reopen(self, engine: &Engine) -> OpenBlock { // revert rewards (i.e. set state back at last transaction's state). - self.block.state.revert_snapshot(); - + let mut block = self.block; + block.state = self.unclosed_state; OpenBlock { - block: self.block, + block: block, engine: engine, last_hashes: self.last_hashes, } @@ -462,11 +462,11 @@ impl LockedBlock { /// Provide a valid seal in order to turn this into a `SealedBlock`. /// This does check the validity of `seal` with the engine. /// Returns the `ClosedBlock` back again if the seal is no good. - pub fn try_seal(self, engine: &Engine, seal: Vec) -> Result { + pub fn try_seal(self, engine: &Engine, seal: Vec) -> Result { let mut s = self; s.block.base.header.set_seal(seal); match engine.verify_block_seal(&s.block.base.header) { - Err(_) => Err(s), + Err(e) => Err((e, s)), _ => Ok(SealedBlock { block: s.block, uncle_bytes: s.uncle_bytes }), } } @@ -474,7 +474,9 @@ impl LockedBlock { impl Drain for LockedBlock { /// Drop this object and return the underlieing database. - fn drain(self) -> Box { self.block.state.drop().1 } + fn drain(self) -> StateDB { + self.block.state.drop().1 + } } impl SealedBlock { @@ -490,7 +492,9 @@ impl SealedBlock { impl Drain for SealedBlock { /// Drop this object and return the underlieing database. - fn drain(self) -> Box { self.block.state.drop().1 } + fn drain(self) -> StateDB { + self.block.state.drop().1 + } } impl IsBlock for SealedBlock { @@ -505,7 +509,7 @@ pub fn enact( uncles: &[Header], engine: &Engine, tracing: bool, - db: Box, + db: StateDB, parent: &Header, last_hashes: Arc, factories: Factories, @@ -526,25 +530,38 @@ pub fn enact( b.set_uncles_hash(header.uncles_hash().clone()); b.set_transactions_root(header.transactions_root().clone()); b.set_receipts_root(header.receipts_root().clone()); - for t in transactions { try!(b.push_transaction(t.clone(), None)); } - for u in uncles { try!(b.push_uncle(u.clone())); } + + try!(push_transactions(&mut b, transactions)); + for u in uncles { + try!(b.push_uncle(u.clone())); + } Ok(b.close_and_lock()) } -/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -#[cfg_attr(feature="dev", allow(too_many_arguments))] -pub fn enact_bytes( - block_bytes: &[u8], - engine: &Engine, - tracing: bool, - db: Box, - parent: &Header, - last_hashes: Arc, - factories: Factories, -) -> Result { - let block = BlockView::new(block_bytes); - let header = block.header(); - enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, factories) +#[inline(always)] +#[cfg(not(feature = "slow-blocks"))] +fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> { + for t in transactions { + try!(block.push_transaction(t.clone(), None)); + } + Ok(()) +} + +#[cfg(feature = "slow-blocks")] +fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> { + use std::time; + + let slow_tx = option_env!("SLOW_TX_DURATION").and_then(|v| v.parse().ok()).unwrap_or(100); + for t in transactions { + let hash = t.hash(); + let start = time::Instant::now(); + try!(block.push_transaction(t.clone(), None)); + let took = start.elapsed(); + if took > time::Duration::from_millis(slow_tx) { + warn!("Heavy transaction in block {:?}: {:?}", block.header().number(), hash); + } + } + Ok(()) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header @@ -553,7 +570,7 @@ pub fn enact_verified( block: &PreverifiedBlock, engine: &Engine, tracing: bool, - db: Box, + db: StateDB, parent: &Header, last_hashes: Arc, factories: Factories, @@ -562,35 +579,54 @@ pub fn enact_verified( enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, factories) } -/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -#[cfg_attr(feature="dev", allow(too_many_arguments))] -pub fn enact_and_seal( - block_bytes: &[u8], - engine: &Engine, - tracing: bool, - db: Box, - parent: &Header, - last_hashes: Arc, - factories: Factories, -) -> Result { - let header = BlockView::new(block_bytes).header_view(); - Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)).seal(engine, header.seal()))) -} - #[cfg(test)] mod tests { use tests::helpers::*; use super::*; use common::*; + use engines::Engine; + use factory::Factories; + use state_db::StateDB; + + /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header + #[cfg_attr(feature="dev", allow(too_many_arguments))] + fn enact_bytes( + block_bytes: &[u8], + engine: &Engine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + ) -> Result { + let block = BlockView::new(block_bytes); + let header = block.header(); + enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, factories) + } + + /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards + #[cfg_attr(feature="dev", allow(too_many_arguments))] + fn enact_and_seal( + block_bytes: &[u8], + engine: &Engine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + ) -> Result { + let header = BlockView::new(block_bytes).header_view(); + Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)).seal(engine, header.seal()))) + } #[test] fn open_block() { use spec::*; let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); @@ -604,25 +640,25 @@ mod tests { let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap() .close_and_lock().seal(engine, vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); let db = e.drain(); - assert_eq!(orig_db.keys(), db.keys()); - assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None); + assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys()); + assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None); } #[test] @@ -632,9 +668,9 @@ mod tests { let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle1_header = Header::new(); @@ -648,9 +684,9 @@ mod tests { let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let bytes = e.rlp_bytes(); @@ -659,7 +695,7 @@ mod tests { assert_eq!(uncles[1].extra_data(), b"uncle2"); let db = e.drain(); - assert_eq!(orig_db.keys(), db.keys()); - assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None); + assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys()); + assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None); } } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 392581fd1..8daf672b9 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -181,7 +181,7 @@ pub struct BlockChain { pending_best_block: RwLock>, pending_block_hashes: RwLock>, - pending_transaction_addresses: RwLock>, + pending_transaction_addresses: RwLock>>, } impl BlockProvider for BlockChain { @@ -331,11 +331,12 @@ impl BlockProvider for BlockChain { .filter_map(|number| self.block_hash(number).map(|hash| (number, hash))) .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) - .flat_map(|(number, hash, mut receipts, hashes)| { + .flat_map(|(number, hash, mut receipts, mut hashes)| { assert_eq!(receipts.len(), hashes.len()); log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len()); let receipts_len = receipts.len(); + hashes.reverse(); receipts.reverse(); receipts.into_iter() .map(|receipt| receipt.logs) @@ -680,8 +681,8 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), - transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), + transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info, block: bytes }, is_best); @@ -714,8 +715,8 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: update, block_receipts: self.prepare_block_receipts_update(receipts, &info), - transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), + transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info, block: bytes, }, is_best); @@ -783,8 +784,8 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), - transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), + transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info.clone(), block: bytes, }, true); @@ -877,7 +878,7 @@ impl BlockChain { let mut write_txs = self.pending_transaction_addresses.write(); batch.extend_with_cache(db::COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite); - batch.extend_with_cache(db::COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite); + batch.extend_with_option_cache(db::COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite); } } @@ -895,18 +896,25 @@ impl BlockChain { *best_block = block; } + let pending_txs = mem::replace(&mut *pending_write_txs, HashMap::new()); + let (retracted_txs, enacted_txs) = pending_txs.into_iter().partition::, _>(|&(_, ref value)| value.is_none()); + let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect(); - let pending_txs_keys: Vec<_> = pending_write_txs.keys().cloned().collect(); + let enacted_txs_keys: Vec<_> = enacted_txs.keys().cloned().collect(); write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new())); - write_txs.extend(mem::replace(&mut *pending_write_txs, HashMap::new())); + write_txs.extend(enacted_txs.into_iter().map(|(k, v)| (k, v.expect("Transactions were partitioned; qed")))); + + for hash in retracted_txs.keys() { + write_txs.remove(hash); + } let mut cache_man = self.cache_man.lock(); for n in pending_hashes_keys { cache_man.note_used(CacheID::BlockHashes(n)); } - for hash in pending_txs_keys { + for hash in enacted_txs_keys { cache_man.note_used(CacheID::TransactionAddresses(hash)); } } @@ -1008,7 +1016,7 @@ impl BlockChain { } /// This function returns modified transaction addresses. - fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap { + fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap> { let block = BlockView::new(block_bytes); let transaction_hashes = block.transaction_hashes(); @@ -1017,10 +1025,10 @@ impl BlockChain { transaction_hashes.into_iter() .enumerate() .map(|(i ,tx_hash)| { - (tx_hash, TransactionAddress { + (tx_hash, Some(TransactionAddress { block_hash: info.hash.clone(), index: i - }) + })) }) .collect() }, @@ -1031,23 +1039,30 @@ impl BlockChain { let hashes = BodyView::new(&bytes).transaction_hashes(); hashes.into_iter() .enumerate() - .map(|(i, tx_hash)| (tx_hash, TransactionAddress { + .map(|(i, tx_hash)| (tx_hash, Some(TransactionAddress { block_hash: hash.clone(), index: i, - })) - .collect::>() + }))) + .collect::>>() }); let current_addresses = transaction_hashes.into_iter() .enumerate() .map(|(i ,tx_hash)| { - (tx_hash, TransactionAddress { + (tx_hash, Some(TransactionAddress { block_hash: info.hash.clone(), index: i - }) + })) }); - addresses.chain(current_addresses).collect() + let retracted = data.retracted.iter().flat_map(|hash| { + let bytes = self.block_body(hash).expect("Retracted block must be in database."); + let hashes = BodyView::new(&bytes).transaction_hashes(); + hashes.into_iter().map(|hash| (hash, None)).collect::>>() + }); + + // The order here is important! Don't remove transaction if it was part of enacted blocks as well. + retracted.chain(addresses).chain(current_addresses).collect() }, BlockLocation::Branch => HashMap::new(), } @@ -1345,6 +1360,70 @@ mod tests { // TODO: insert block that already includes one of them as an uncle to check it's not allowed. } + #[test] + fn test_fork_transaction_addresses() { + let mut canon_chain = ChainGenerator::default(); + let mut finalizer = BlockFinalizer::default(); + let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let mut fork_chain = canon_chain.fork(1); + let mut fork_finalizer = finalizer.fork(); + + let t1 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), + }.sign(&"".sha3()); + + + let b1a = canon_chain + .with_transaction(t1.clone()) + .generate(&mut finalizer).unwrap(); + + // Empty block + let b1b = fork_chain + .generate(&mut fork_finalizer).unwrap(); + + let b2 = fork_chain + .generate(&mut fork_finalizer).unwrap(); + + let b1a_hash = BlockView::new(&b1a).header_view().sha3(); + let b2_hash = BlockView::new(&b2).header_view().sha3(); + + let t1_hash = t1.hash(); + + let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + + let mut batch = db.transaction(); + let _ = bc.insert_block(&mut batch, &b1a, vec![]); + bc.commit(); + let _ = bc.insert_block(&mut batch, &b1b, vec![]); + bc.commit(); + db.write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b1a_hash); + assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { + block_hash: b1a_hash.clone(), + index: 0, + })); + + // now let's make forked chain the canon chain + let mut batch = db.transaction(); + let _ = bc.insert_block(&mut batch, &b2, vec![]); + bc.commit(); + db.write(batch).unwrap(); + + // Transaction should be retracted + assert_eq!(bc.best_block_hash(), b2_hash); + assert_eq!(bc.transaction_address(&t1_hash), None); + } + + + #[test] fn test_overwriting_transaction_addresses() { let mut canon_chain = ChainGenerator::default(); @@ -1415,14 +1494,14 @@ mod tests { db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b1a_hash); - assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress { + assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { block_hash: b1a_hash.clone(), index: 0, - }); - assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress { + })); + assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { block_hash: b1a_hash.clone(), index: 1, - }); + })); // now let's make forked chain the canon chain let mut batch = db.transaction(); @@ -1431,18 +1510,18 @@ mod tests { db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b2_hash); - assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress { + assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { block_hash: b1b_hash.clone(), index: 1, - }); - assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress { + })); + assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { block_hash: b1b_hash.clone(), index: 0, - }); - assert_eq!(bc.transaction_address(&t3_hash).unwrap(), TransactionAddress { + })); + assert_eq!(bc.transaction_address(&t3_hash), Some(TransactionAddress { block_hash: b2_hash.clone(), index: 0, - }); + })); } #[test] @@ -1682,7 +1761,7 @@ mod tests { gas_price: 0.into(), gas: 100_000.into(), action: Action::Create, - value: 100.into(), + value: 101.into(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), }.sign(&"".sha3()); let t2 = Transaction { @@ -1690,7 +1769,7 @@ mod tests { gas_price: 0.into(), gas: 100_000.into(), action: Action::Create, - value: 100.into(), + value: 102.into(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), }.sign(&"".sha3()); let t3 = Transaction { @@ -1698,7 +1777,7 @@ mod tests { gas_price: 0.into(), gas: 100_000.into(), action: Action::Create, - value: 100.into(), + value: 103.into(), data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), }.sign(&"".sha3()); let tx_hash1 = t1.hash(); diff --git a/ethcore/src/blockchain/update.rs b/ethcore/src/blockchain/update.rs index 24d0644e8..0d7d1dbed 100644 --- a/ethcore/src/blockchain/update.rs +++ b/ethcore/src/blockchain/update.rs @@ -17,8 +17,8 @@ pub struct ExtrasUpdate<'a> { pub block_details: HashMap, /// Modified block receipts. pub block_receipts: HashMap, - /// Modified transaction addresses. - pub transactions_addresses: HashMap, /// Modified blocks blooms. pub blocks_blooms: HashMap, + /// Modified transaction addresses (None signifies removed transactions). + pub transactions_addresses: HashMap>, } diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 0c34382a0..e0282d460 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -18,7 +18,7 @@ use ipc::IpcConfig; use util::H256; /// Represents what has to be handled by actor listening to chain events -#[derive(Ipc)] +#[ipc] pub trait ChainNotify : Send + Sync { /// fires when chain has new blocks. fn new_blocks(&self, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index fcf79cf71..edc97ac9c 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -23,15 +23,14 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock}; -use util::journaldb::{self, JournalDB}; -use util::{U256, H256, Address, H2048, Uint}; -use util::sha3::*; -use util::TrieFactory; +use util::{journaldb, TrieFactory, Trie}; +use util::trie::TrieSpec; +use util::{U256, H256, Address, H2048, Uint, FixedHash}; use util::kvdb::*; // other use io::*; -use views::{BlockView, HeaderView, BodyView}; +use views::{HeaderView, BodyView}; use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; use header::BlockNumber; use state::State; @@ -47,12 +46,12 @@ use transaction::{LocalizedTransaction, SignedTransaction, Action}; use blockchain::extras::TransactionAddress; use types::filter::Filter; use log_entry::LocalizedLogEntry; -use block_queue::{BlockQueue, BlockQueueInfo}; +use verification::queue::{BlockQueue, QueueInfo as BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{ BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, - ChainNotify + ChainNotify, }; use client::Error as ClientError; use env_info::EnvInfo; @@ -66,7 +65,7 @@ use miner::{Miner, MinerService}; use snapshot::{self, io as snapshot_io}; use factory::Factories; use rlp::{View, UntrustedRlp}; - +use state_db::StateDB; // re-export pub use types::blockchain_info::BlockChainInfo; @@ -126,9 +125,9 @@ pub struct Client { tracedb: RwLock>, engine: Arc, config: ClientConfig, - db: RwLock>, pruning: journaldb::Algorithm, - state_db: RwLock>, + db: RwLock>, + state_db: Mutex, block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, @@ -148,13 +147,6 @@ pub struct Client { /// assume finality of a given candidate. pub const HISTORY: u64 = 1200; -/// Append a path element to the given path and return the string. -pub fn append_path

(path: P, item: &str) -> String where P: AsRef { - let mut p = path.as_ref().to_path_buf(); - p.push(item); - p.to_str().unwrap().to_owned() -} - impl Client { /// Create a new client with given spec and DB path and custom verifier. pub fn new( @@ -168,18 +160,24 @@ impl Client { let path = path.to_path_buf(); let gb = spec.genesis_block(); - let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); + let db = Arc::new(try!(Database::open(&db_config, &path.to_str().expect("DB path could not be converted to string.")).map_err(ClientError::Database))); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); - let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); - if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { + let trie_spec = match config.fat_db { + true => TrieSpec::Fat, + false => TrieSpec::Secure, + }; + + let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); + let mut state_db = StateDB::new(journal_db); + if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) { let mut batch = DBTransaction::new(&db); try!(state_db.commit(&mut batch, 0, &spec.genesis_header().hash(), None)); try!(db.write(batch).map_err(ClientError::Database)); } - if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) { + if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.journal_db().contains(h.state_root())) { warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex()); } @@ -193,7 +191,7 @@ impl Client { let factories = Factories { vm: EvmFactory::new(config.vm_type.clone()), - trie: TrieFactory::new(config.trie_spec.clone()), + trie: TrieFactory::new(trie_spec), accountdb: Default::default(), }; @@ -208,7 +206,7 @@ impl Client { verifier: verification::new(config.verifier_type.clone()), config: config, db: RwLock::new(db), - state_db: RwLock::new(state_db), + state_db: Mutex::new(state_db), block_queue: block_queue, report: RwLock::new(Default::default()), import_lock: Mutex::new(()), @@ -291,30 +289,27 @@ impl Client { // Check if Parent is in chain let chain_has_parent = chain.block_header(header.parent_hash()); - if let None = chain_has_parent { + if let Some(parent) = chain_has_parent { + // Enact Verified Block + let last_hashes = self.build_last_hashes(header.parent_hash().clone()); + let db = self.state_db.lock().boxed_clone_canon(&header.parent_hash()); + + let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); + let locked_block = try!(enact_result.map_err(|e| { + warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + })); + + // Final Verification + if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { + warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + return Err(()); + } + + Ok(locked_block) + } else { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash()); - return Err(()); - }; - - // Enact Verified Block - let parent = chain_has_parent.unwrap(); - let last_hashes = self.build_last_hashes(header.parent_hash().clone()); - let db = self.state_db.read().boxed_clone(); - - let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); - if let Err(e) = enact_result { - warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); - }; - - // Final Verification - let locked_block = enact_result.unwrap(); - if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { - warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - return Err(()); + Err(()) } - - Ok(locked_block) } fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec, Vec) { @@ -358,23 +353,21 @@ impl Client { for block in blocks { let header = &block.header; - if invalid_blocks.contains(header.parent_hash()) { + let is_invalid = invalid_blocks.contains(header.parent_hash()); + if is_invalid { invalid_blocks.insert(header.hash()); continue; } - let closed_block = self.check_and_close_block(&block); - if let Err(_) = closed_block { + if let Ok(closed_block) = self.check_and_close_block(&block) { + imported_blocks.push(header.hash()); + + let route = self.commit_block(closed_block, &header.hash(), &block.bytes); + import_results.push(route); + + self.report.write().accrue_block(&block); + } else { invalid_blocks.insert(header.hash()); - continue; } - - let closed_block = closed_block.unwrap(); - imported_blocks.push(header.hash()); - - let route = self.commit_block(closed_block, &header.hash(), &block.bytes); - import_results.push(route); - - self.report.write().accrue_block(&block); } let imported = imported_blocks.len(); @@ -424,7 +417,7 @@ impl Client { // Are we committing an era? let ancient = if number >= HISTORY { let n = number - HISTORY; - Some((n, chain.block_hash(n).unwrap())) + Some((n, chain.block_hash(n).expect("only verified blocks can be commited; verified block has hash; qed"))) } else { None }; @@ -442,7 +435,8 @@ impl Client { // CHECK! I *think* this is fine, even if the state_root is equal to another // already-imported block of the same number. // TODO: Prove it with a test. - block.drain().commit(&mut batch, number, hash, ancient).expect("DB commit failed."); + let mut state = block.drain(); + state.commit(&mut batch, number, hash, ancient).expect("DB commit failed."); let route = chain.insert_block(&mut batch, block_data, receipts); self.tracedb.read().import(&mut batch, TraceImportRequest { @@ -452,10 +446,11 @@ impl Client { enacted: route.enacted.clone(), retracted: route.retracted.len() }); + let is_canon = route.enacted.last().map_or(false, |h| h == hash); + state.sync_cache(&route.enacted, &route.retracted, is_canon); // Final commit to the DB self.db.read().write_buffered(batch); chain.commit(); - self.update_last_hashes(&parent, hash); route } @@ -503,7 +498,7 @@ impl Client { }; self.block_header(id).and_then(|header| { - let db = self.state_db.read().boxed_clone(); + let db = self.state_db.lock().boxed_clone(); // early exit for pruned blocks if db.is_pruned() && self.chain.read().best_block_number() >= block_number + HISTORY { @@ -533,9 +528,11 @@ impl Client { /// Get a copy of the best block's state. pub fn state(&self) -> State { + let header = self.best_block_header(); + let header = HeaderView::new(&header); State::from_existing( - self.state_db.read().boxed_clone(), - HeaderView::new(&self.best_block_header()).state_root(), + self.state_db.lock().boxed_clone_canon(&header.hash()), + header.state_root(), self.engine.account_start_nonce(), self.factories.clone()) .expect("State root of best block header always valid.") @@ -549,7 +546,7 @@ impl Client { /// Get the report. pub fn report(&self) -> ClientReport { let mut report = self.report.read().clone(); - report.state_db_mem = self.state_db.read().mem_used(); + report.state_db_mem = self.state_db.lock().mem_used(); report } @@ -605,7 +602,7 @@ impl Client { /// Take a snapshot at the given block. /// If the ID given is "latest", this will default to 1000 blocks behind. pub fn take_snapshot(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), EthcoreError> { - let db = self.state_db.read().boxed_clone(); + let db = self.state_db.lock().journal_db().boxed_clone(); let best_block_number = self.chain_info().best_block_number; let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))); @@ -684,14 +681,14 @@ impl snapshot::DatabaseRestore for Client { trace!(target: "snapshot", "Replacing client database with {:?}", new_db); let _import_lock = self.import_lock.lock(); - let mut state_db = self.state_db.write(); + let mut state_db = self.state_db.lock(); let mut chain = self.chain.write(); let mut tracedb = self.tracedb.write(); self.miner.clear(); let db = self.db.write(); try!(db.restore(new_db)); - *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); + *state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE)); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); Ok(()) @@ -811,7 +808,7 @@ impl BlockChainClient for Client { let chain = self.chain.read(); match Self::block_hash(&chain, id) { Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.block_queue.block_status(&hash), + Some(hash) => self.block_queue.status(&hash).into(), None => BlockStatus::Unknown } } @@ -836,7 +833,7 @@ impl BlockChainClient for Client { } fn code(&self, address: &Address, id: BlockID) -> Option> { - self.state_at(id).map(|s| s.code(address)) + self.state_at(id).map(|s| s.code(address).map(|c| (*c).clone())) } fn balance(&self, address: &Address, id: BlockID) -> Option { @@ -847,6 +844,38 @@ impl BlockChainClient for Client { self.state_at(id).map(|s| s.storage_at(address, position)) } + fn list_accounts(&self, id: BlockID) -> Option> { + if !self.factories.trie.is_fat() { + trace!(target: "fatdb", "list_accounts: Not a fat DB"); + return None; + } + + let state = match self.state_at(id) { + Some(state) => state, + _ => return None, + }; + + let (root, db) = state.drop(); + let trie = match self.factories.trie.readonly(db.as_hashdb(), &root) { + Ok(trie) => trie, + _ => { + trace!(target: "fatdb", "list_accounts: Couldn't open the DB"); + return None; + } + }; + + let iter = match trie.iter() { + Ok(iter) => iter, + _ => return None, + }; + + let accounts = iter.filter_map(|item| { + item.ok().map(|(addr, _)| Address::from_slice(&addr)) + }).collect(); + + Some(accounts) + } + fn transaction(&self, id: TransactionID) -> Option { self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } @@ -858,12 +887,17 @@ impl BlockChainClient for Client { fn transaction_receipt(&self, id: TransactionID) -> Option { let chain = self.chain.read(); - self.transaction_address(id).and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| { + self.transaction_address(id) + .and_then(|address| chain.block_number(&address.block_hash).and_then(|block_number| { let t = chain.block_body(&address.block_hash) - .and_then(|block| BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index)); + .and_then(|block| { + BodyView::new(&block).localized_transaction_at(&address.block_hash, block_number, address.index) + }); - match (t, chain.transaction_receipt(&address)) { - (Some(tx), Some(receipt)) => { + let tx_and_sender = t.and_then(|tx| tx.sender().ok().map(|sender| (tx, sender))); + + match (tx_and_sender, chain.transaction_receipt(&address)) { + (Some((tx, sender)), Some(receipt)) => { let block_hash = tx.block_hash.clone(); let block_number = tx.block_number.clone(); let transaction_hash = tx.hash(); @@ -885,7 +919,7 @@ impl BlockChainClient for Client { gas_used: receipt.gas_used - prior_gas_used, contract_address: match tx.action { Action::Call(_) => None, - Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce)) + Action::Create => Some(contract_address(&sender, &tx.nonce)) }, logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { entry: log, @@ -915,7 +949,7 @@ impl BlockChainClient for Client { } fn state_data(&self, hash: &H256) -> Option { - self.state_db.read().state(hash) + self.state_db.lock().journal_db().state(hash) } fn block_receipts(&self, hash: &H256) -> Option { @@ -923,16 +957,21 @@ impl BlockChainClient for Client { } fn import_block(&self, bytes: Bytes) -> Result { + use verification::queue::kind::HasHash; + use verification::queue::kind::blocks::Unverified; + + // create unverified block here so the `sha3` calculation can be cached. + let unverified = Unverified::new(bytes); + { - let header = BlockView::new(&bytes).header_view(); - if self.chain.read().is_known(&header.sha3()) { + if self.chain.read().is_known(&unverified.hash()) { return Err(BlockImportError::Import(ImportError::AlreadyInChain)); } - if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown { - return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash()))); + if self.block_status(BlockID::Hash(unverified.parent_hash())) == BlockStatus::Unknown { + return Err(BlockImportError::Block(BlockError::UnknownParent(unverified.parent_hash()))); } } - Ok(try!(self.block_queue.import_block(bytes))) + Ok(try!(self.block_queue.import(unverified))) } fn queue_info(&self) -> BlockQueueInfo { @@ -981,17 +1020,18 @@ impl BlockChainClient for Client { let start = self.block_number(filter.range.start); let end = self.block_number(filter.range.end); - if start.is_some() && end.is_some() { - let filter = trace::Filter { - range: start.unwrap() as usize..end.unwrap() as usize, - from_address: From::from(filter.from_address), - to_address: From::from(filter.to_address), - }; + match (start, end) { + (Some(s), Some(e)) => { + let filter = trace::Filter { + range: s as usize..e as usize, + from_address: From::from(filter.from_address), + to_address: From::from(filter.to_address), + }; - let traces = self.tracedb.read().filter(&filter); - Some(traces) - } else { - None + let traces = self.tracedb.read().filter(&filter); + Some(traces) + }, + _ => None, } } @@ -1036,7 +1076,7 @@ impl BlockChainClient for Client { } fn pending_transactions(&self) -> Vec { - self.miner.pending_transactions() + self.miner.pending_transactions(self.chain.read().best_block_number()) } } @@ -1050,7 +1090,7 @@ impl MiningBlockChainClient for Client { engine, self.factories.clone(), false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. - self.state_db.read().boxed_clone(), + self.state_db.lock().boxed_clone_canon(&h), &chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), self.build_last_hashes(h.clone()), author, @@ -1061,11 +1101,15 @@ impl MiningBlockChainClient for Client { // Add uncles chain .find_uncle_headers(&h, engine.maximum_uncle_age()) - .unwrap() + .unwrap_or_else(Vec::new) .into_iter() .take(engine.maximum_uncle_count()) .foreach(|h| { - open_block.push_uncle(h).unwrap(); + open_block.push_uncle(h).expect("pushing maximum_uncle_count; + open_block was just created; + push_uncle is not ok only if more than maximum_uncle_count is pushed; + so all push_uncle are Ok; + qed"); }); open_block @@ -1086,6 +1130,7 @@ impl MiningBlockChainClient for Client { let block_data = block.rlp_bytes(); let route = self.commit_block(block, &h, &block_data); trace!(target: "client", "Imported sealed block #{} ({})", number, h); + self.state_db.lock().sync_cache(&route.enacted, &route.retracted, false); let (enacted, retracted) = self.calculate_enacted_retracted(&[route]); self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted); diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 0146293df..8cf54387b 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -16,13 +16,12 @@ use std::str::FromStr; pub use std::time::Duration; -pub use block_queue::BlockQueueConfig; pub use blockchain::Config as BlockChainConfig; pub use trace::Config as TraceConfig; pub use evm::VMType; -pub use verification::VerifierType; + +use verification::{VerifierType, QueueConfig}; use util::{journaldb, CompactionProfile}; -use util::trie::TrieSpec; /// Client state db compaction profile #[derive(Debug, PartialEq)] @@ -84,15 +83,15 @@ impl Default for Mode { #[derive(Debug, PartialEq, Default)] pub struct ClientConfig { /// Block queue configuration. - pub queue: BlockQueueConfig, + pub queue: QueueConfig, /// Blockchain configuration. pub blockchain: BlockChainConfig, /// Trace configuration. pub tracing: TraceConfig, /// VM type. pub vm_type: VMType, - /// Trie type. - pub trie_spec: TrieSpec, + /// Fat DB enabled? + pub fat_db: bool, /// The JournalDB ("pruning") algorithm to use. pub pruning: journaldb::Algorithm, /// The name of the client instance. diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index a5ff89c47..1e8aa9d72 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -23,20 +23,27 @@ mod trace; mod client; pub use self::client::*; -pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType}; +pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType}; pub use self::error::Error; pub use types::ids::*; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; pub use types::trace_filter::Filter as TraceFilter; pub use executive::{Executed, Executive, TransactOptions}; pub use env_info::{LastHashes, EnvInfo}; -pub use self::chain_notify::{ChainNotify, ChainNotifyClient}; +pub use self::chain_notify::ChainNotify; pub use types::call_analytics::CallAnalytics; pub use block_import_error::BlockImportError; pub use transaction_import::TransactionImportResult; pub use transaction_import::TransactionImportError; -pub use self::traits::{BlockChainClient, MiningBlockChainClient, RemoteClient}; +pub use self::traits::{BlockChainClient, MiningBlockChainClient}; + +/// IPC interfaces +#[cfg(feature="ipc")] +pub mod remote { + pub use super::traits::RemoteClient; + pub use super::chain_notify::ChainNotifyClient; +} mod traits { #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index c18784dbb..dfd7129f6 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -25,8 +25,9 @@ use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; use client::{ BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID, - TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError + TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, }; +use db::{NUM_COLUMNS, COL_STATE}; use header::{Header as BlockHeader, BlockNumber}; use filter::Filter; use log_entry::LocalizedLogEntry; @@ -37,11 +38,12 @@ use evm::{Factory as EvmFactory, VMType}; use miner::{Miner, MinerService, TransactionImportResult}; use spec::Spec; -use block_queue::BlockQueueInfo; +use verification::queue::QueueInfo; use block::{OpenBlock, SealedBlock}; use executive::Executed; use error::CallError; use trace::LocalizedTrace; +use state_db::StateDB; /// Test client. pub struct TestBlockChainClient { @@ -53,6 +55,8 @@ pub struct TestBlockChainClient { pub genesis_hash: H256, /// Last block hash. pub last_hash: RwLock, + /// Extra data do set for each block + pub extra_data: Bytes, /// Difficulty. pub difficulty: RwLock, /// Balances. @@ -103,16 +107,27 @@ impl Default for TestBlockChainClient { impl TestBlockChainClient { /// Creates new test client. pub fn new() -> Self { + Self::new_with_extra_data(Bytes::new()) + } + + /// Creates new test client with specified extra data for each block + pub fn new_with_extra_data(extra_data: Bytes) -> Self { let spec = Spec::new_test(); - TestBlockChainClient::new_with_spec(spec) + TestBlockChainClient::new_with_spec_and_extra(spec, extra_data) } /// Create test client with custom spec. pub fn new_with_spec(spec: Spec) -> Self { + TestBlockChainClient::new_with_spec_and_extra(spec, Bytes::new()) + } + + /// Create test client with custom spec and extra data. + pub fn new_with_spec_and_extra(spec: Spec, extra_data: Bytes) -> Self { let mut client = TestBlockChainClient { blocks: RwLock::new(HashMap::new()), numbers: RwLock::new(HashMap::new()), genesis_hash: H256::new(), + extra_data: extra_data, last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), balances: RwLock::new(HashMap::new()), @@ -132,7 +147,7 @@ impl TestBlockChainClient { client.genesis_hash = client.last_hash.read().clone(); client } - + /// Set the transaction receipt result pub fn set_transaction_receipt(&self, id: TransactionID, receipt: LocalizedReceipt) { self.receipts.write().insert(id, receipt); @@ -187,6 +202,7 @@ impl TestBlockChainClient { header.set_parent_hash(self.last_hash.read().clone()); header.set_number(n as BlockNumber); header.set_gas_limit(U256::from(1_000_000)); + header.set_extra_data(self.extra_data.clone()); let uncles = match with { EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { let mut uncles = RlpStream::new_list(1); @@ -288,13 +304,14 @@ impl TestBlockChainClient { } } -pub fn get_temp_journal_db() -> GuardedTempResult> { +pub fn get_temp_state_db() -> GuardedTempResult { let temp = RandomTempPath::new(); - let db = Database::open_default(temp.as_str()).unwrap(); - let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None); + let db = Database::open(&DatabaseConfig::with_columns(NUM_COLUMNS), temp.as_str()).unwrap(); + let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); + let state_db = StateDB::new(journal_db); GuardedTempResult { _temp: temp, - result: Some(journal_db) + result: Some(state_db) } } @@ -302,9 +319,9 @@ impl MiningBlockChainClient for TestBlockChainClient { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { let engine = &*self.spec.engine; let genesis_header = self.spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - self.spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + self.spec.ensure_db_good(&mut db).unwrap(); let last_hashes = vec![genesis_header.hash()]; let mut open_block = OpenBlock::new( @@ -387,12 +404,16 @@ impl BlockChainClient for TestBlockChainClient { } } + fn list_accounts(&self, _id: BlockID) -> Option> { + None + } + fn transaction(&self, _id: TransactionID) -> Option { - unimplemented!(); + None // Simple default. } fn uncle(&self, _id: UncleID) -> Option { - unimplemented!(); + None // Simple default. } fn transaction_receipt(&self, id: TransactionID) -> Option { @@ -549,8 +570,8 @@ impl BlockChainClient for TestBlockChainClient { Ok(h) } - fn queue_info(&self) -> BlockQueueInfo { - BlockQueueInfo { + fn queue_info(&self) -> QueueInfo { + QueueInfo { verified_queue_size: self.queue_size.load(AtomicOrder::Relaxed), unverified_queue_size: 0, verifying_queue_size: 0, @@ -600,6 +621,6 @@ impl BlockChainClient for TestBlockChainClient { } fn pending_transactions(&self) -> Vec { - self.miner.pending_transactions() + self.miner.pending_transactions(self.chain_info().best_block_number) } } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 45f7322fd..5f7b62ee2 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; use util::{U256, Address, H256, H2048, Bytes, Itertools}; use blockchain::TreeRoute; -use block_queue::BlockQueueInfo; +use verification::queue::QueueInfo as BlockQueueInfo; use block::{OpenBlock, SealedBlock}; use header::{BlockNumber}; use transaction::{LocalizedTransaction, SignedTransaction}; @@ -38,7 +38,6 @@ use ipc::IpcConfig; use types::blockchain_info::BlockChainInfo; use types::block_status::BlockStatus; -#[derive(Ipc)] #[ipc(client_ident="RemoteClient")] /// Blockchain database client. Owns and manages a blockchain and a block queue. pub trait BlockChainClient : Sync + Send { @@ -112,6 +111,9 @@ pub trait BlockChainClient : Sync + Send { Therefore storage_at has returned Some; qed") } + /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. + fn list_accounts(&self, id: BlockID) -> Option>; + /// Get transaction with given hash. fn transaction(&self, id: TransactionID) -> Option; diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index 61cd41bd6..10672d730 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -34,8 +34,10 @@ pub const COL_BODIES: Option = Some(2); pub const COL_EXTRA: Option = Some(3); /// Column for Traces pub const COL_TRACE: Option = Some(4); +/// Column for Traces +pub const COL_ACCOUNT_BLOOM: Option = Some(5); /// Number of columns in DB -pub const NUM_COLUMNS: Option = Some(5); +pub const NUM_COLUMNS: Option = Some(6); /// Modes for updating caches. #[derive(Clone, Copy)] @@ -86,6 +88,9 @@ pub trait Writable { /// Writes the value into the database. fn write(&mut self, col: Option, key: &Key, value: &T) where T: rlp::Encodable, R: Deref; + /// Deletes key from the databse. + fn delete(&mut self, col: Option, key: &Key) where T: rlp::Encodable, R: Deref; + /// Writes the value into the database and updates the cache. fn write_with_cache(&mut self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where K: Key + Hash + Eq, @@ -122,6 +127,34 @@ pub trait Writable { }, } } + + /// Writes and removes the values into the database and updates the cache. + fn extend_with_option_cache(&mut self, col: Option, cache: &mut Cache>, values: HashMap>, policy: CacheUpdatePolicy) where + K: Key + Hash + Eq, + T: rlp::Encodable, + R: Deref { + match policy { + CacheUpdatePolicy::Overwrite => { + for (key, value) in values.into_iter() { + match value { + Some(ref v) => self.write(col, &key, v), + None => self.delete(col, &key), + } + cache.insert(key, value); + } + }, + CacheUpdatePolicy::Remove => { + for (key, value) in values.into_iter() { + match value { + Some(v) => self.write(col, &key, &v), + None => self.delete(col, &key), + } + cache.remove(&key); + } + }, + } + } + } /// Should be used to read values from database. @@ -173,6 +206,10 @@ impl Writable for DBTransaction { fn write(&mut self, col: Option, key: &Key, value: &T) where T: rlp::Encodable, R: Deref { self.put(col, &key.key(), &rlp::encode(value)); } + + fn delete(&mut self, col: Option, key: &Key) where T: rlp::Encodable, R: Deref { + self.delete(col, &key.key()); + } } impl Readable for Database { diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 824a84ecb..6730c2aac 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -337,9 +337,9 @@ mod tests { let spec = Spec::new_test_round(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 3a69adb4c..c003da304 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -245,9 +245,9 @@ mod tests { let spec = new_test_authority(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 26d2ed5bf..174a80ea8 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -81,9 +81,9 @@ mod tests { let spec = Spec::new_test_instant(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 734acb758..982698a50 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethash::{quick_get_difficulty, EthashManager, H256 as EH256}; +use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager, H256 as EH256}; use common::*; use block::*; use spec::CommonParams; @@ -32,6 +32,8 @@ pub struct EthashParams { pub minimum_difficulty: U256, /// Difficulty bound divisor. pub difficulty_bound_divisor: U256, + /// Difficulty increment divisor. + pub difficulty_increment_divisor: u64, /// Block duration. pub duration_limit: u64, /// Block reward. @@ -46,6 +48,12 @@ pub struct EthashParams { pub dao_hardfork_beneficiary: Address, /// DAO hard-fork DAO accounts list (L) pub dao_hardfork_accounts: Vec

, + /// Transition block for a change of difficulty params (currently just bound_divisor). + pub difficulty_hardfork_transition: u64, + /// Difficulty param after the difficulty transition. + pub difficulty_hardfork_bound_divisor: U256, + /// Block on which there is no additional difficulty from the exponential bomb. + pub bomb_defuse_transition: u64, } impl From for EthashParams { @@ -54,6 +62,7 @@ impl From for EthashParams { gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), minimum_difficulty: p.minimum_difficulty.into(), difficulty_bound_divisor: p.difficulty_bound_divisor.into(), + difficulty_increment_divisor: p.difficulty_increment_divisor.map_or(10, Into::into), duration_limit: p.duration_limit.into(), block_reward: p.block_reward.into(), registrar: p.registrar.map_or_else(Address::new, Into::into), @@ -61,6 +70,9 @@ impl From for EthashParams { dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into), dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into), dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(), + difficulty_hardfork_transition: p.difficulty_hardfork_transition.map_or(0x7fffffffffffffff, Into::into), + difficulty_hardfork_bound_divisor: p.difficulty_hardfork_bound_divisor.map_or(p.difficulty_bound_divisor.into(), Into::into), + bomb_defuse_transition: p.bomb_defuse_transition.map_or(0x7fffffffffffffff, Into::into), } } } @@ -168,6 +180,10 @@ impl Engine for Ethash { fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8))); } + // Commit state so that we can actually figure out the state root. + if let Err(e) = fields.state.commit() { + warn!("Encountered error on state commit: {}", e); + } } fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { @@ -217,6 +233,7 @@ impl Engine for Ethash { let result = self.pow.compute_light(header.number() as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64()); let mix = Ethash::from_ethash(result.mix_hash); let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(result.value)); + trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, Ethash::from_ethash(slow_get_seedhash(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), Ethash::from_ethash(result.mix_hash), Ethash::from_ethash(result.value)); if mix != header.mix_hash() { return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() }))); } @@ -267,7 +284,11 @@ impl Ethash { } let min_difficulty = self.ethash_params.minimum_difficulty; - let difficulty_bound_divisor = self.ethash_params.difficulty_bound_divisor; + let difficulty_hardfork = header.number() >= self.ethash_params.difficulty_hardfork_transition; + let difficulty_bound_divisor = match difficulty_hardfork { + true => self.ethash_params.difficulty_hardfork_bound_divisor, + false => self.ethash_params.difficulty_bound_divisor, + }; let duration_limit = self.ethash_params.duration_limit; let frontier_limit = self.ethash_params.frontier_compatibility_mode_limit; @@ -281,17 +302,19 @@ impl Ethash { else { trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp()); //block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99) - let diff_inc = (header.timestamp() - parent.timestamp()) / 10; + let diff_inc = (header.timestamp() - parent.timestamp()) / self.ethash_params.difficulty_increment_divisor; if diff_inc <= 1 { - parent.difficulty().clone() + parent.difficulty().clone() / From::from(2048) * From::from(1 - diff_inc) + parent.difficulty().clone() + parent.difficulty().clone() / From::from(difficulty_bound_divisor) * From::from(1 - diff_inc) } else { - parent.difficulty().clone() - parent.difficulty().clone() / From::from(2048) * From::from(min(diff_inc - 1, 99)) + parent.difficulty().clone() - parent.difficulty().clone() / From::from(difficulty_bound_divisor) * From::from(min(diff_inc - 1, 99)) } }; target = max(min_difficulty, target); - let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize; - if period > 1 { - target = max(min_difficulty, target + (U256::from(1) << (period - 2))); + if header.number() < self.ethash_params.bomb_defuse_transition { + let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize; + if period > 1 { + target = max(min_difficulty, target + (U256::from(1) << (period - 2))); + } } target } @@ -355,9 +378,9 @@ mod tests { let spec = new_morden(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close(); @@ -369,9 +392,9 @@ mod tests { let spec = new_morden(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle = Header::new(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 6d46d5551..6d4502d2d 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -42,6 +42,9 @@ pub fn new_frontier() -> Spec { load(include_bytes!("../../res/ethereum/frontier /// Create a new Frontier mainnet chain spec without the DAO hardfork. pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) } +/// Create a new Frontier mainnet chain spec without the DAO hardfork. +pub fn new_expanse() -> Spec { load(include_bytes!("../../res/ethereum/expanse.json")) } + /// Create a new Frontier chain spec as though it never changes to Homestead. pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) } @@ -69,9 +72,9 @@ mod tests { let spec = new_morden(); let engine = &spec.engine; let genesis_header = spec.genesis_header(); - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + spec.ensure_db_good(&mut db).unwrap(); let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into()); diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index 2bbc7035b..6397f067e 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -81,7 +81,7 @@ pub trait Ext { ) -> MessageCallResult; /// Returns code at given address - fn extcode(&self, address: &Address) -> Bytes; + fn extcode(&self, address: &Address) -> Arc; /// Returns code size at given address fn extcodesize(&self, address: &Address) -> usize; diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 94800c7de..629b423da 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -18,8 +18,10 @@ //! //! TODO: consider spliting it into two separate files. use std::fmt; +use std::sync::Arc; use evm::Evm; use util::{U256, Uint}; +use super::interpreter::SharedCache; #[derive(Debug, PartialEq, Clone)] /// Type of EVM to use. @@ -82,7 +84,8 @@ impl VMType { /// Evm factory. Creates appropriate Evm. #[derive(Clone)] pub struct Factory { - evm: VMType + evm: VMType, + evm_cache: Arc, } impl Factory { @@ -95,9 +98,9 @@ impl Factory { Box::new(super::jit::JitEvm::default()) }, VMType::Interpreter => if Self::can_fit_in_usize(gas) { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } else { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } } } @@ -108,9 +111,9 @@ impl Factory { pub fn create(&self, gas: U256) -> Box { match self.evm { VMType::Interpreter => if Self::can_fit_in_usize(gas) { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } else { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } } } @@ -118,7 +121,8 @@ impl Factory { /// Create new instance of specific `VMType` factory pub fn new(evm: VMType) -> Self { Factory { - evm: evm + evm: evm, + evm_cache: Arc::new(SharedCache::default()), } } @@ -132,7 +136,8 @@ impl Default for Factory { #[cfg(all(feature = "jit", not(test)))] fn default() -> Factory { Factory { - evm: VMType::Jit + evm: VMType::Jit, + evm_cache: Arc::new(SharedCache::default()), } } @@ -140,7 +145,8 @@ impl Default for Factory { #[cfg(any(not(feature = "jit"), test))] fn default() -> Factory { Factory { - evm: VMType::Interpreter + evm: VMType::Interpreter, + evm_cache: Arc::new(SharedCache::default()), } } } diff --git a/ethcore/src/evm/interpreter/informant.rs b/ethcore/src/evm/interpreter/informant.rs new file mode 100644 index 000000000..200b01526 --- /dev/null +++ b/ethcore/src/evm/interpreter/informant.rs @@ -0,0 +1,164 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub use self::inner::*; + +#[macro_use] +#[cfg(not(feature = "evm-debug"))] +mod inner { + macro_rules! evm_debug { + ($x: expr) => {} + } + + pub struct EvmInformant; + impl EvmInformant { + pub fn new(_depth: usize) -> Self { + EvmInformant {} + } + pub fn done(&mut self) {} + } +} + +#[macro_use] +#[cfg(feature = "evm-debug")] +mod inner { + use std::iter; + use std::collections::HashMap; + use std::time::{Instant, Duration}; + + use evm::interpreter::stack::Stack; + use evm::instructions::{Instruction, InstructionInfo, INSTRUCTIONS}; + use evm::{CostType}; + + use util::U256; + + macro_rules! evm_debug { + ($x: expr) => { + $x + } + } + + fn print(data: String) { + if cfg!(feature = "evm-debug-tests") { + println!("{}", data); + } else { + debug!(target: "evm", "{}", data); + } + } + + pub struct EvmInformant { + spacing: String, + last_instruction: Instant, + stats: HashMap, + } + + impl EvmInformant { + + fn color(instruction: Instruction, name: &str) -> String { + let c = instruction as usize % 6; + let colors = [31, 34, 33, 32, 35, 36]; + format!("\x1B[1;{}m{}\x1B[0m", colors[c], name) + } + + fn as_micro(duration: &Duration) -> u64 { + let mut sec = duration.as_secs(); + let subsec = duration.subsec_nanos() as u64; + sec = sec.saturating_mul(1_000_000u64); + sec += subsec / 1_000; + sec + } + + pub fn new(depth: usize) -> Self { + EvmInformant { + spacing: iter::repeat(".").take(depth).collect(), + last_instruction: Instant::now(), + stats: HashMap::new(), + } + } + + pub fn before_instruction(&mut self, pc: usize, instruction: Instruction, info: &InstructionInfo, current_gas: &Cost, stack: &Stack) { + let time = self.last_instruction.elapsed(); + self.last_instruction = Instant::now(); + + print(format!("{}[0x{:<3x}][{:>19}(0x{:<2x}) Gas Left: {:6?} (Previous took: {:10}μs)", + &self.spacing, + pc, + Self::color(instruction, info.name), + instruction, + current_gas, + Self::as_micro(&time), + )); + + if info.args > 0 { + for (idx, item) in stack.peek_top(info.args).iter().enumerate() { + print(format!("{} |{:2}: {:?}", self.spacing, idx, item)); + } + } + } + + pub fn after_instruction(&mut self, instruction: Instruction) { + let mut stats = self.stats.entry(instruction).or_insert_with(|| Stats::default()); + let took = self.last_instruction.elapsed(); + stats.note(took); + } + + pub fn done(&mut self) { + // Print out stats + let infos = &*INSTRUCTIONS; + + let mut stats: Vec<(_,_)> = self.stats.drain().collect(); + stats.sort_by(|ref a, ref b| b.1.avg().cmp(&a.1.avg())); + + print(format!("\n{}-------OPCODE STATS:", self.spacing)); + for (instruction, stats) in stats.into_iter() { + let info = infos[instruction as usize]; + print(format!("{}-------{:>19}(0x{:<2x}) count: {:4}, avg: {:10}μs", + self.spacing, + Self::color(instruction, info.name), + instruction, + stats.count, + stats.avg(), + )); + } + } + + } + + struct Stats { + count: u64, + total_duration: Duration, + } + + impl Default for Stats { + fn default() -> Self { + Stats { + count: 0, + total_duration: Duration::from_secs(0), + } + } + } + + impl Stats { + fn note(&mut self, took: Duration) { + self.count += 1; + self.total_duration += took; + } + + fn avg(&self) -> u64 { + EvmInformant::as_micro(&self.total_duration) / self.count + } + } +} diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index ad2d5cd34..887f37cef 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -16,25 +16,17 @@ //! Rust VM implementation -#[cfg(not(feature = "evm-debug"))] -macro_rules! evm_debug { - ($x: expr) => {} -} - -#[cfg(feature = "evm-debug")] -macro_rules! evm_debug { - ($x: expr) => { - $x - } -} - +#[macro_use] +mod informant; mod gasometer; mod stack; mod memory; +mod shared_cache; use self::gasometer::Gasometer; use self::stack::{Stack, VecStack}; use self::memory::Memory; +pub use self::shared_cache::SharedCache; use std::marker::PhantomData; use common::*; @@ -43,13 +35,6 @@ use super::instructions::{self, Instruction, InstructionInfo}; use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType}; use bit_set::BitSet; -#[cfg(feature = "evm-debug")] -fn color(instruction: Instruction, name: &'static str) -> String { - let c = instruction as usize % 6; - let colors = [31, 34, 33, 32, 35, 36]; - format!("\x1B[1;{}m{}\x1B[0m", colors[c], name) -} - type CodePosition = usize; type ProgramCounter = usize; @@ -72,6 +57,15 @@ struct CodeReader<'a> { #[cfg_attr(feature="dev", allow(len_without_is_empty))] impl<'a> CodeReader<'a> { + + /// Create new code reader - starting at position 0. + fn new(code: &'a Bytes) -> Self { + CodeReader { + position: 0, + code: code, + } + } + /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { let pos = self.position; @@ -98,9 +92,9 @@ enum InstructionResult { /// Intepreter EVM implementation -#[derive(Default)] pub struct Interpreter { mem: Vec, + cache: Arc, _type: PhantomData, } @@ -108,26 +102,25 @@ impl evm::Evm for Interpreter { fn exec(&mut self, params: ActionParams, ext: &mut evm::Ext) -> evm::Result { self.mem.clear(); + let mut informant = informant::EvmInformant::new(ext.depth()); + let code = ¶ms.code.as_ref().unwrap(); - let valid_jump_destinations = self.find_jump_destinations(code); + let valid_jump_destinations = self.cache.jump_destinations(¶ms.code_hash, code); let mut gasometer = Gasometer::::new(try!(Cost::from_u256(params.gas))); let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); - let mut reader = CodeReader { - position: 0, - code: code - }; + let mut reader = CodeReader::new(code); let infos = &*instructions::INSTRUCTIONS; while reader.position < code.len() { let instruction = code[reader.position]; reader.position += 1; - let info = infos[instruction as usize]; - try!(self.verify_instruction(ext, instruction, &info, &stack)); + let info = &infos[instruction as usize]; + try!(self.verify_instruction(ext, instruction, info, &stack)); // Calculate gas cost - let (gas_cost, mem_gas, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, &info, &stack, self.mem.size())); + let (gas_cost, mem_gas, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, info, &stack, self.mem.size())); // TODO: make compile-time removable if too much of a performance hit. let trace_executed = ext.trace_prepare_execute(reader.position - 1, instruction, &gas_cost.as_u256()); @@ -136,15 +129,7 @@ impl evm::Evm for Interpreter { gasometer.current_mem_gas = mem_gas; gasometer.current_gas = gasometer.current_gas - gas_cost; - evm_debug!({ - println!("[0x{:x}][{}(0x{:x}) Gas: {:?}\n Gas Before: {:?}", - reader.position, - color(instruction, info.name), - instruction, - gas_cost, - gasometer.current_gas + gas_cost - ); - }); + evm_debug!({ informant.before_instruction(reader.position, instruction, info, &gasometer.current_gas, &stack) }); let (mem_written, store_written) = match trace_executed { true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)), @@ -156,6 +141,8 @@ impl evm::Evm for Interpreter { gasometer.current_gas, ¶ms, ext, instruction, &mut reader, &mut stack )); + evm_debug!({ informant.after_instruction(instruction) }); + if trace_executed { ext.trace_executed(gasometer.current_gas.as_u256(), stack.peek_top(info.ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written); } @@ -177,17 +164,26 @@ impl evm::Evm for Interpreter { reader.position = pos; }, InstructionResult::StopExecutionNeedsReturn(gas, off, size) => { + informant.done(); return Ok(GasLeft::NeedsReturn(gas.as_u256(), self.mem.read_slice(off, size))); }, InstructionResult::StopExecution => break, } } - + informant.done(); Ok(GasLeft::Known(gasometer.current_gas.as_u256())) } } impl Interpreter { + /// Create a new `Interpreter` instance with shared cache. + pub fn new(cache: Arc) -> Interpreter { + Interpreter { + mem: Vec::new(), + cache: cache, + _type: PhantomData::default(), + } + } fn verify_instruction(&self, ext: &evm::Ext, instruction: Instruction, info: &InstructionInfo, stack: &Stack) -> evm::Result<()> { let schedule = ext.schedule(); @@ -486,10 +482,10 @@ impl Interpreter { stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { - self.copy_data_to_memory(stack, ¶ms.data.clone().unwrap_or_else(|| vec![])); + self.copy_data_to_memory(stack, params.data.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); }, instructions::CODECOPY => { - self.copy_data_to_memory(stack, ¶ms.code.clone().unwrap_or_else(|| vec![])); + self.copy_data_to_memory(stack, params.code.as_ref().map_or_else(|| &[] as &[u8], |c| &**c as &[u8])); }, instructions::EXTCODECOPY => { let address = u256_to_address(&stack.pop_back()); @@ -790,23 +786,6 @@ impl Interpreter { Ok(()) } - fn find_jump_destinations(&self, code: &[u8]) -> BitSet { - let mut jump_dests = BitSet::with_capacity(code.len()); - let mut position = 0; - - while position < code.len() { - let instruction = code[position]; - - if instruction == instructions::JUMPDEST { - jump_dests.insert(position); - } else if instructions::is_push(instruction) { - position += instructions::get_push_bytes(instruction); - } - position += 1; - } - - jump_dests - } } fn get_and_reset_sign(value: U256) -> (U256, bool) { @@ -833,15 +812,3 @@ fn address_to_u256(value: Address) -> U256 { U256::from(&*H256::from(value)) } -#[test] -fn test_find_jump_destinations() { - // given - let interpreter = Interpreter::::default(); - let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap(); - - // when - let valid_jump_destinations = interpreter.find_jump_destinations(&code); - - // then - assert!(valid_jump_destinations.contains(66)); -} diff --git a/ethcore/src/evm/interpreter/shared_cache.rs b/ethcore/src/evm/interpreter/shared_cache.rs new file mode 100644 index 000000000..ce383bae8 --- /dev/null +++ b/ethcore/src/evm/interpreter/shared_cache.rs @@ -0,0 +1,84 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use lru_cache::LruCache; +use util::{H256, Mutex}; +use util::sha3::*; +use bit_set::BitSet; +use super::super::instructions; + +const CACHE_CODE_ITEMS: usize = 65536; + +/// GLobal cache for EVM interpreter +pub struct SharedCache { + jump_destinations: Mutex>> +} + +impl SharedCache { + /// Get jump destincations bitmap for a contract. + pub fn jump_destinations(&self, code_hash: &H256, code: &[u8]) -> Arc { + if code_hash == &SHA3_EMPTY { + return Self::find_jump_destinations(code); + } + if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) { + return d.clone(); + } + + let d = Self::find_jump_destinations(code); + self.jump_destinations.lock().insert(code_hash.clone(), d.clone()); + d + } + + fn find_jump_destinations(code: &[u8]) -> Arc { + let mut jump_dests = BitSet::with_capacity(code.len()); + let mut position = 0; + + while position < code.len() { + let instruction = code[position]; + + if instruction == instructions::JUMPDEST { + jump_dests.insert(position); + } else if instructions::is_push(instruction) { + position += instructions::get_push_bytes(instruction); + } + position += 1; + } + Arc::new(jump_dests) + } +} + +impl Default for SharedCache { + fn default() -> SharedCache { + SharedCache { + jump_destinations: Mutex::new(LruCache::new(CACHE_CODE_ITEMS)), + } + } +} + + +#[test] +fn test_find_jump_destinations() { + use util::FromHex; + // given + let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap(); + + // when + let valid_jump_destinations = SharedCache::find_jump_destinations(&code); + + // then + assert!(valid_jump_destinations.contains(66)); +} diff --git a/ethcore/src/evm/interpreter/stack.rs b/ethcore/src/evm/interpreter/stack.rs index 98adf8539..0d7ef4dbb 100644 --- a/ethcore/src/evm/interpreter/stack.rs +++ b/ethcore/src/evm/interpreter/stack.rs @@ -34,7 +34,7 @@ pub trait Stack { /// Get number of elements on Stack fn size(&self) -> usize; /// Returns all data on stack. - fn peek_top(&mut self, no_of_elems: usize) -> &[T]; + fn peek_top(&self, no_of_elems: usize) -> &[T]; } pub struct VecStack { @@ -68,12 +68,7 @@ impl Stack for VecStack { fn pop_back(&mut self) -> S { let val = self.stack.pop(); match val { - Some(x) => { - evm_debug!({ - println!(" POP: {}", x) - }); - x - }, + Some(x) => x, None => panic!("Tried to pop from empty stack.") } } @@ -88,9 +83,6 @@ impl Stack for VecStack { } fn push(&mut self, elem: S) { - evm_debug!({ - println!(" PUSH: {}", elem) - }); self.stack.push(elem); } @@ -98,7 +90,7 @@ impl Stack for VecStack { self.stack.len() } - fn peek_top(&mut self, no_from_top: usize) -> &[S] { + fn peek_top(&self, no_from_top: usize) -> &[S] { assert!(self.stack.len() >= no_from_top, "peek_top asked for more items than exist."); &self.stack[self.stack.len() - no_from_top .. self.stack.len()] } diff --git a/ethcore/src/evm/jit.rs b/ethcore/src/evm/jit.rs index c62f87ab7..3487d9a59 100644 --- a/ethcore/src/evm/jit.rs +++ b/ethcore/src/evm/jit.rs @@ -196,6 +196,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { receive_address: *const evmjit::H256, code_address: *const evmjit::H256, transfer_value: *const evmjit::I256, + // We are ignoring apparent value - it's handled in externalities. _apparent_value: *const evmjit::I256, in_beg: *const u8, in_size: u64, @@ -208,12 +209,13 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { let sender_address = unsafe { Address::from_jit(&*sender_address) }; let receive_address = unsafe { Address::from_jit(&*receive_address) }; let code_address = unsafe { Address::from_jit(&*code_address) }; - // TODO Is it always safe in case of DELEGATE_CALL? let transfer_value = unsafe { U256::from_jit(&*transfer_value) }; - let value = Some(transfer_value); // receive address and code address are the same in normal calls let is_callcode = receive_address != code_address; + let is_delegatecall = is_callcode && sender_address != receive_address; + + let value = if is_delegatecall { None } else { Some(transfer_value) }; if !is_callcode && !self.ext.exists(&code_address) { gas_cost = gas_cost + U256::from(self.ext.schedule().call_new_account_gas); @@ -242,10 +244,10 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { } } - // TODO [ToDr] Any way to detect DelegateCall? - let call_type = match is_callcode { - true => CallType::CallCode, - false => CallType::Call, + let call_type = match (is_callcode, is_delegatecall) { + (_, true) => CallType::DelegateCall, + (true, false) => CallType::CallCode, + (false, false) => CallType::Call, }; match self.ext.call( diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index ec217b6c5..eb7d168cf 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -49,7 +49,7 @@ pub struct FakeExt { depth: usize, store: HashMap, blockhashes: HashMap, - codes: HashMap, + codes: HashMap>, logs: Vec, _suicides: HashSet
, info: EnvInfo, @@ -136,12 +136,12 @@ impl Ext for FakeExt { MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Bytes { - self.codes.get(address).unwrap_or(&Bytes::new()).clone() + fn extcode(&self, address: &Address) -> Arc { + self.codes.get(address).unwrap_or(&Arc::new(Bytes::new())).clone() } fn extcodesize(&self, address: &Address) -> usize { - self.codes.get(address).map(|v| v.len()).unwrap_or(0) + self.codes.get(address).map_or(0, |c| c.len()) } fn log(&mut self, topics: Vec, data: &[u8]) { @@ -184,11 +184,11 @@ fn test_stack_underflow() { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let err = { - let mut vm : Box = Box::new(super::interpreter::Interpreter::::default()); + let mut vm : Box = Box::new(super::interpreter::Interpreter::::new(Arc::new(super::interpreter::SharedCache::default()))); test_finalize(vm.exec(params, &mut ext)).unwrap_err() }; @@ -211,7 +211,7 @@ fn test_add(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -231,7 +231,7 @@ fn test_sha3(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -251,7 +251,7 @@ fn test_address(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -273,7 +273,7 @@ fn test_origin(factory: super::Factory) { params.address = address.clone(); params.origin = origin.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -295,7 +295,7 @@ fn test_sender(factory: super::Factory) { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -329,9 +329,9 @@ fn test_extcodecopy(factory: super::Factory) { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); - ext.codes.insert(sender, sender_code); + ext.codes.insert(sender, Arc::new(sender_code)); let gas_left = { let mut vm = factory.create(params.gas); @@ -350,7 +350,7 @@ fn test_log_empty(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -382,7 +382,7 @@ fn test_log_sender(factory: super::Factory) { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -406,7 +406,7 @@ fn test_blockhash(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.blockhashes.insert(U256::zero(), blockhash.clone()); @@ -428,7 +428,7 @@ fn test_calldataload(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); params.data = Some(data); let mut ext = FakeExt::new(); @@ -449,7 +449,7 @@ fn test_author(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.author = author; @@ -469,7 +469,7 @@ fn test_timestamp(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.timestamp = timestamp; @@ -489,7 +489,7 @@ fn test_number(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.number = number; @@ -509,7 +509,7 @@ fn test_difficulty(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.difficulty = difficulty; @@ -529,7 +529,7 @@ fn test_gas_limit(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.gas_limit = gas_limit; @@ -548,7 +548,7 @@ fn test_mul(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -566,7 +566,7 @@ fn test_sub(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -584,7 +584,7 @@ fn test_div(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -602,7 +602,7 @@ fn test_div_zero(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -620,7 +620,7 @@ fn test_mod(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -639,7 +639,7 @@ fn test_smod(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -658,7 +658,7 @@ fn test_sdiv(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -677,7 +677,7 @@ fn test_exp(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -697,7 +697,7 @@ fn test_comparison(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -718,7 +718,7 @@ fn test_signed_comparison(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -739,7 +739,7 @@ fn test_bitops(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(150_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -762,7 +762,7 @@ fn test_addmod_mulmod(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -783,7 +783,7 @@ fn test_byte(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -802,7 +802,7 @@ fn test_signextend(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -822,7 +822,7 @@ fn test_badinstruction_int() { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let err = { @@ -842,7 +842,7 @@ fn test_pop(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -862,7 +862,7 @@ fn test_extops(factory: super::Factory) { params.gas = U256::from(150_000); params.gas_price = U256::from(0x32); params.value = ActionValue::Transfer(U256::from(0x99)); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -885,7 +885,7 @@ fn test_jumps(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(150_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -908,7 +908,7 @@ fn test_calls(factory: super::Factory) { let code_address = Address::from(0x998); let mut params = ActionParams::default(); params.gas = U256::from(150_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); params.address = address.clone(); let mut ext = FakeExt::new(); ext.balances = { diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 8f8b534ee..f3186d6dd 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -25,10 +25,10 @@ use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, E use crossbeam; pub use types::executed::{Executed, ExecutionResult}; -/// Max depth to avoid stack overflow (when it's reached we start a new thread with VM) +/// Roughly estimate what stack size each level of evm depth will use /// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132) /// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp` -const MAX_VM_DEPTH_FOR_THREAD: usize = 64; +const STACK_SIZE_PER_DEPTH: usize = 24*1024; /// Returns new address created from address and given nonce. pub fn contract_address(address: &Address, nonce: &U256) -> Address { @@ -149,12 +149,13 @@ impl<'a> Executive<'a> { // TODO: we might need bigints here, or at least check overflows. let balance = self.state.balance(&sender); - let gas_cost = U512::from(t.gas) * U512::from(t.gas_price); + let gas_cost = t.gas.full_mul(t.gas_price); let total_cost = U512::from(t.value) + gas_cost; // avoid unaffordable transactions - if U512::from(balance) < total_cost { - return Err(From::from(ExecutionError::NotEnoughCash { required: total_cost, got: U512::from(balance) })); + let balance512 = U512::from(balance); + if balance512 < total_cost { + return Err(From::from(ExecutionError::NotEnoughCash { required: total_cost, got: balance512 })); } // NOTE: there can be no invalid transactions from this point. @@ -168,13 +169,14 @@ impl<'a> Executive<'a> { let new_address = contract_address(&sender, &nonce); let params = ActionParams { code_address: new_address.clone(), + code_hash: t.data.sha3(), address: new_address, sender: sender.clone(), origin: sender.clone(), gas: init_gas, gas_price: t.gas_price, value: ActionValue::Transfer(t.value), - code: Some(t.data.clone()), + code: Some(Arc::new(t.data.clone())), data: None, call_type: CallType::None, }; @@ -190,6 +192,7 @@ impl<'a> Executive<'a> { gas_price: t.gas_price, value: ActionValue::Transfer(t.value), code: self.state.code(address), + code_hash: self.state.code_hash(address), data: Some(t.data.clone()), call_type: CallType::Call, }; @@ -210,8 +213,11 @@ impl<'a> Executive<'a> { tracer: &mut T, vm_tracer: &mut V ) -> evm::Result where T: Tracer, V: VMTracer { + + let depth_threshold = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get() / STACK_SIZE_PER_DEPTH); + // Ordinary execution - keep VM in same thread - if (self.depth + 1) % MAX_VM_DEPTH_FOR_THREAD != 0 { + if (self.depth + 1) % depth_threshold != 0 { let vm_factory = self.vm_factory; let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer); trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call); @@ -263,7 +269,7 @@ impl<'a> Executive<'a> { let cost = self.engine.cost_of_builtin(¶ms.code_address, data); if cost <= params.gas { self.engine.execute_builtin(¶ms.code_address, data, &mut output); - self.state.clear_snapshot(); + self.state.discard_snapshot(); // trace only top level calls to builtins to avoid DDoS attacks if self.depth == 0 { @@ -283,7 +289,7 @@ impl<'a> Executive<'a> { Ok(params.gas - cost) } else { // just drain the whole gas - self.state.revert_snapshot(); + self.state.revert_to_snapshot(); tracer.trace_failed_call(trace_info, vec![], evm::Error::OutOfGas.into()); @@ -329,7 +335,7 @@ impl<'a> Executive<'a> { res } else { // otherwise it's just a basic transaction, only do tracing, if necessary. - self.state.clear_snapshot(); + self.state.discard_snapshot(); tracer.trace_call(trace_info, U256::zero(), trace_output, vec![]); Ok(params.gas) @@ -411,7 +417,7 @@ impl<'a> Executive<'a> { // real ammount to refund let gas_left_prerefund = match result { Ok(x) => x, _ => 0.into() }; - let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) / U256::from(2)); + let refunded = cmp::min(refunds_bound, (t.gas - gas_left_prerefund) >> 1); let gas_left = gas_left_prerefund + refunded; let gas_used = t.gas - gas_left; @@ -471,10 +477,10 @@ impl<'a> Executive<'a> { | Err(evm::Error::BadInstruction {.. }) | Err(evm::Error::StackUnderflow {..}) | Err(evm::Error::OutOfStack {..}) => { - self.state.revert_snapshot(); + self.state.revert_to_snapshot(); }, Ok(_) | Err(evm::Error::Internal) => { - self.state.clear_snapshot(); + self.state.discard_snapshot(); substate.accrue(un_substate); } } @@ -511,7 +517,7 @@ mod tests { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some("3331600055".from_hex().unwrap()); + params.code = Some(Arc::new("3331600055".from_hex().unwrap())); params.value = ActionValue::Transfer(U256::from(0x7)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -570,7 +576,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -589,8 +595,11 @@ mod tests { assert_eq!(substate.contracts_created.len(), 0); } - evm_test!{test_call_to_create: test_call_to_create_jit, test_call_to_create_int} - fn test_call_to_create(factory: Factory) { + #[test] + // Tracing is not suported in JIT + fn test_call_to_create() { + let factory = Factory::new(VMType::Interpreter); + // code: // // 7c 601080600c6000396000f3006000355415600957005b60203560003555 - push 29 bytes? @@ -625,7 +634,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); params.call_type = CallType::Call; let mut state_result = get_temp_state(); @@ -712,8 +721,10 @@ mod tests { assert_eq!(vm_tracer.drain().unwrap(), expected_vm_trace); } - evm_test!{test_create_contract: test_create_contract_jit, test_create_contract_int} - fn test_create_contract(factory: Factory) { + #[test] + fn test_create_contract() { + // Tracing is not supported in JIT + let factory = Factory::new(VMType::Interpreter); // code: // // 60 10 - push 16 @@ -735,7 +746,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(100.into()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -823,7 +834,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -875,7 +886,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -932,7 +943,7 @@ mod tests { params.address = address_a.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code_a.clone()); + params.code = Some(Arc::new(code_a.clone())); params.value = ActionValue::Transfer(U256::from(100_000)); let mut state_result = get_temp_state(); @@ -982,10 +993,10 @@ mod tests { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code.clone())); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.init_code(&address, code.clone()); + state.init_code(&address, code); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -1183,7 +1194,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(0x0186a0); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 7395522c3..67c04aefb 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -146,7 +146,8 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT gas: *gas, gas_price: self.origin_info.gas_price, value: ActionValue::Transfer(*value), - code: Some(code.to_vec()), + code: Some(Arc::new(code.to_vec())), + code_hash: code.sha3(), data: None, call_type: CallType::None, }; @@ -185,6 +186,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT gas: *gas, gas_price: self.origin_info.gas_price, code: self.state.code(code_address), + code_hash: self.state.code_hash(code_address), data: Some(data.to_vec()), call_type: call_type, }; @@ -201,15 +203,14 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT } } - fn extcode(&self, address: &Address) -> Bytes { - self.state.code(address).unwrap_or_else(|| vec![]) + fn extcode(&self, address: &Address) -> Arc { + self.state.code(address).unwrap_or_else(|| Arc::new(vec![])) } fn extcodesize(&self, address: &Address) -> usize { self.state.code_size(address).unwrap_or(0) } - #[cfg_attr(feature="dev", allow(match_ref_pats))] fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result where Self: Sized { diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index 04581fac9..7d86cfd61 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -295,6 +295,12 @@ impl Encodable for Header { } } +impl HeapSizeOf for Header { + fn heap_size_of_children(&self) -> usize { + self.extra_data.heap_size_of_children() + self.seal.heap_size_of_children() + } +} + #[cfg(test)] mod tests { use rustc_serialize::hex::FromHex; diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 1fe98acdb..5576f9ad4 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -127,7 +127,7 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer { MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Bytes { + fn extcode(&self, address: &Address) -> Arc { self.ext.extcode(address) } @@ -232,7 +232,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { for (address, account) in vm.post_state.unwrap().into_iter() { let address = address.into(); let code: Vec = account.code.into(); - fail_unless(state.code(&address).unwrap_or_else(Vec::new) == code, "code is incorrect"); + fail_unless(state.code(&address).as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); fail_unless(state.balance(&address) == account.balance.into(), "balance is incorrect"); fail_unless(state.nonce(&address) == account.nonce.into(), "nonce is incorrect"); account.storage.into_iter().foreach(|(k, v)| { diff --git a/ethcore/src/json_tests/homestead_chain.rs b/ethcore/src/json_tests/homestead_chain.rs index 8db8ad224..314c36cf1 100644 --- a/ethcore/src/json_tests/homestead_chain.rs +++ b/ethcore/src/json_tests/homestead_chain.rs @@ -36,3 +36,7 @@ declare_test!{BlockchainTests_Homestead_bcUncleHeaderValiditiy, "BlockchainTests declare_test!{BlockchainTests_Homestead_bcUncleTest, "BlockchainTests/Homestead/bcUncleTest"} declare_test!{BlockchainTests_Homestead_bcValidBlockTest, "BlockchainTests/Homestead/bcValidBlockTest"} declare_test!{BlockchainTests_Homestead_bcWalletTest, "BlockchainTests/Homestead/bcWalletTest"} +declare_test!{BlockchainTests_Homestead_bcShanghaiLove, "BlockchainTests/Homestead/bcShanghaiLove"} +// Uncomment once the test is correct. +// declare_test!{BlockchainTests_Homestead_bcSuicideIssue, "BlockchainTests/Homestead/bcSuicideIssue"} +declare_test!{BlockchainTests_Homestead_bcExploitTest, "BlockchainTests/Homestead/bcExploitTest"} diff --git a/ethcore/src/json_tests/state.rs b/ethcore/src/json_tests/state.rs index 28aaa62ec..d7011cd28 100644 --- a/ethcore/src/json_tests/state.rs +++ b/ethcore/src/json_tests/state.rs @@ -96,7 +96,6 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec { declare_test!{StateTests_stBlockHashTest, "StateTests/stBlockHashTest"} declare_test!{StateTests_stCallCodes, "StateTests/stCallCodes"} declare_test!{StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} -declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"} declare_test!{StateTests_stExample, "StateTests/stExample"} declare_test!{StateTests_stInitCodeTest, "StateTests/stInitCodeTest"} declare_test!{StateTests_stLogTests, "StateTests/stLogTests"} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 9231f4208..2e71cbcf4 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -99,6 +99,8 @@ extern crate ethcore_devtools as devtools; extern crate rand; extern crate bit_set; extern crate rlp; +extern crate ethcore_bloom_journal as bloom_journal; +extern crate byteorder; #[macro_use] extern crate log; @@ -110,6 +112,7 @@ extern crate lazy_static; extern crate heapsize; #[macro_use] extern crate ethcore_ipc as ipc; +extern crate lru_cache; #[cfg(feature = "jit" )] extern crate evmjit; @@ -119,7 +122,6 @@ pub extern crate ethstore; pub mod account_provider; pub mod engines; pub mod block; -pub mod block_queue; pub mod client; pub mod error; pub mod ethereum; @@ -143,6 +145,7 @@ mod basic_types; mod env_info; mod pod_account; mod state; +mod state_db; mod account_db; mod builtin; mod executive; diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs index 5c0c6f420..7ccafac74 100644 --- a/ethcore/src/migrations/mod.rs +++ b/ethcore/src/migrations/mod.rs @@ -23,3 +23,6 @@ pub mod extras; mod v9; pub use self::v9::ToV9; pub use self::v9::Extract; + +mod v10; +pub use self::v10::ToV10; diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index 9327decef..9af75a8ed 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -24,6 +24,7 @@ use util::{Address, FixedHash, H256}; use util::kvdb::Database; use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress}; use util::sha3::Hashable; +use std::sync::Arc; use rlp::{decode, Rlp, RlpStream, Stream, View}; @@ -107,7 +108,7 @@ pub struct OverlayRecentV7 { impl OverlayRecentV7 { // walk all journal entries in the database backwards. // find migrations for any possible inserted keys. - fn walk_journal(&mut self, source: &Database) -> Result<(), Error> { + fn walk_journal(&mut self, source: Arc) -> Result<(), Error> { if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) { let mut era = decode::(&val); loop { @@ -151,7 +152,7 @@ impl OverlayRecentV7 { // walk all journal entries in the database backwards. // replace all possible inserted/deleted keys with their migrated counterparts // and commit the altered entries. - fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { + fn migrate_journal(&self, source: Arc, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) { try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest)); @@ -228,7 +229,7 @@ impl Migration for OverlayRecentV7 { // walk all records in the database, attempting to migrate any possible and // keeping records of those that we do. then migrate the journal using // this information. - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, col); // check version metadata. @@ -257,7 +258,7 @@ impl Migration for OverlayRecentV7 { try!(batch.insert(key, value.into_vec(), dest)); } - try!(self.walk_journal(source)); + try!(self.walk_journal(source.clone())); self.migrate_journal(source, batch, dest) } } diff --git a/ethcore/src/migrations/v10.rs b/ethcore/src/migrations/v10.rs new file mode 100644 index 000000000..88884fb26 --- /dev/null +++ b/ethcore/src/migrations/v10.rs @@ -0,0 +1,117 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Bloom upgrade + +use std::sync::Arc; +use db::{COL_EXTRA, COL_HEADERS, COL_STATE}; +use state_db::{ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET, StateDB}; +use util::trie::TrieDB; +use views::HeaderView; +use bloom_journal::Bloom; +use util::migration::{Error, Migration, Progress, Batch, Config}; +use util::journaldb; +use util::{H256, FixedHash, Trie}; +use util::{Database, DBTransaction}; + +/// Account bloom upgrade routine. If bloom already present, does nothing. +/// If database empty (no best block), does nothing. +/// Can be called on upgraded database with no issues (will do nothing). +pub fn generate_bloom(source: Arc, dest: &mut Database) -> Result<(), Error> { + trace!(target: "migration", "Account bloom upgrade started"); + let best_block_hash = match try!(source.get(COL_EXTRA, b"best")) { + // no migration needed + None => { + trace!(target: "migration", "No best block hash, skipping"); + return Ok(()); + }, + Some(hash) => hash, + }; + let best_block_header = match try!(source.get(COL_HEADERS, &best_block_hash)) { + // no best block, nothing to do + None => { + trace!(target: "migration", "No best block header, skipping"); + return Ok(()) + }, + Some(x) => x, + }; + let state_root = HeaderView::new(&best_block_header).state_root(); + + trace!("Adding accounts bloom (one-time upgrade)"); + let bloom_journal = { + let mut bloom = Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET); + // no difference what algorithm is passed, since there will be no writes + let state_db = journaldb::new( + source.clone(), + journaldb::Algorithm::OverlayRecent, + COL_STATE); + let account_trie = try!(TrieDB::new(state_db.as_hashdb(), &state_root).map_err(|e| Error::Custom(format!("Cannot open trie: {:?}", e)))); + for item in try!(account_trie.iter().map_err(|_| Error::MigrationImpossible)) { + let (ref account_key, _) = try!(item.map_err(|_| Error::MigrationImpossible)); + let account_key_hash = H256::from_slice(&account_key); + bloom.set(&*account_key_hash); + } + + bloom.drain_journal() + }; + + trace!(target: "migration", "Generated {} bloom updates", bloom_journal.entries.len()); + + let mut batch = DBTransaction::new(dest); + try!(StateDB::commit_bloom(&mut batch, bloom_journal).map_err(|_| Error::Custom("Failed to commit bloom".to_owned()))); + try!(dest.write(batch)); + + trace!(target: "migration", "Finished bloom update"); + + + Ok(()) +} + +/// Account bloom migration. +#[derive(Default)] +pub struct ToV10 { + progress: Progress, +} + +impl ToV10 { + /// New v10 migration + pub fn new() -> ToV10 { ToV10 { progress: Progress::default() } } +} + +impl Migration for ToV10 { + fn version(&self) -> u32 { + 10 + } + + fn pre_columns(&self) -> Option { Some(5) } + + fn columns(&self) -> Option { Some(6) } + + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + let mut batch = Batch::new(config, col); + for (key, value) in source.iter(col) { + self.progress.tick(); + try!(batch.insert(key.to_vec(), value.to_vec(), dest)); + } + try!(batch.commit(dest)); + + if col == COL_STATE { + try!(generate_bloom(source, dest)); + } + + Ok(()) + } +} diff --git a/ethcore/src/migrations/v9.rs b/ethcore/src/migrations/v9.rs index d4070d0c0..83729dc55 100644 --- a/ethcore/src/migrations/v9.rs +++ b/ethcore/src/migrations/v9.rs @@ -20,6 +20,7 @@ use rlp::{Rlp, RlpStream, View, Stream}; use util::kvdb::Database; use util::migration::{Batch, Config, Error, Migration, Progress}; +use std::sync::Arc; /// Which part of block to preserve pub enum Extract { @@ -55,7 +56,7 @@ impl Migration for ToV9 { fn version(&self) -> u32 { 9 } - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, self.column); for (key, value) in source.iter(col) { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 8a6e607cc..996e25871 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -48,6 +48,17 @@ pub enum PendingSet { SealingOrElseQueue, } +/// Type of the gas limit to apply to the transaction queue. +#[derive(Debug, PartialEq)] +pub enum GasLimit { + /// Depends on the block gas limit and is updated with every block. + Auto, + /// No limit. + None, + /// Set to a fixed gas value. + Fixed(U256), +} + /// Configures the behaviour of the miner. #[derive(Debug, PartialEq)] pub struct MinerOptions { @@ -71,6 +82,8 @@ pub struct MinerOptions { pub work_queue_size: usize, /// Can we submit two different solutions for the same block and expect both to result in an import? pub enable_resubmission: bool, + /// Global gas limit for all transaction in the queue except for local and retracted. + pub tx_queue_gas_limit: GasLimit, } impl Default for MinerOptions { @@ -81,11 +94,12 @@ impl Default for MinerOptions { reseal_on_external_tx: false, reseal_on_own_tx: true, tx_gas_limit: !U256::zero(), - tx_queue_size: 1024, + tx_queue_size: 2048, pending_set: PendingSet::AlwaysQueue, reseal_min_period: Duration::from_secs(2), work_queue_size: 20, enable_resubmission: true, + tx_queue_gas_limit: GasLimit::Auto, } } } @@ -194,7 +208,11 @@ impl Miner { true => None, false => Some(WorkPoster::new(&options.new_work_notify)) }; - let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); + let gas_limit = match options.tx_queue_gas_limit { + GasLimit::Fixed(ref limit) => *limit, + _ => !U256::zero(), + }; + let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, gas_limit, options.tx_gas_limit))); Miner { transaction_queue: txq, next_allowed_reseal: Mutex::new(Instant::now()), @@ -449,6 +467,10 @@ impl Miner { let gas_limit = HeaderView::new(&chain.best_block_header()).gas_limit(); let mut queue = self.transaction_queue.lock(); queue.set_gas_limit(gas_limit); + if let GasLimit::Auto = self.options.tx_queue_gas_limit { + // Set total tx queue gas limit to be 2x the block gas limit. + queue.set_total_gas_limit(gas_limit << 1); + } } /// Returns true if we had to prepare new pending block. @@ -499,6 +521,21 @@ impl Miner { /// Are we allowed to do a non-mandatory reseal? fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock() } + + fn from_pending_block(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H + where F: Fn() -> H, G: Fn(&ClosedBlock) -> H { + let sealing_work = self.sealing_work.lock(); + sealing_work.queue.peek_last_ref().map_or_else( + || from_chain(), + |b| { + if b.block().header().number() > latest_block_number { + map_block(b) + } else { + from_chain() + } + } + ) + } } const SEALING_TIMEOUT_IN_BLOCKS : u64 = 5; @@ -571,29 +608,35 @@ impl MinerService for Miner { } fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { - let sealing_work = self.sealing_work.lock(); - sealing_work.queue.peek_last_ref().map_or_else( + self.from_pending_block( + chain.chain_info().best_block_number, || chain.latest_balance(address), |b| b.block().fields().state.balance(address) ) } fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> H256 { - let sealing_work = self.sealing_work.lock(); - sealing_work.queue.peek_last_ref().map_or_else( + self.from_pending_block( + chain.chain_info().best_block_number, || chain.latest_storage_at(address, position), |b| b.block().fields().state.storage_at(address, position) ) } fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> U256 { - let sealing_work = self.sealing_work.lock(); - sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_nonce(address), |b| b.block().fields().state.nonce(address)) + self.from_pending_block( + chain.chain_info().best_block_number, + || chain.latest_nonce(address), + |b| b.block().fields().state.nonce(address) + ) } fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { - let sealing_work = self.sealing_work.lock(); - sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_code(address), |b| b.block().fields().state.code(address)) + self.from_pending_block( + chain.chain_info().best_block_number, + || chain.latest_code(address), + |b| b.block().fields().state.code(address).map(|c| (*c).clone()) + ) } fn set_author(&self, author: Address) { @@ -743,50 +786,74 @@ impl MinerService for Miner { queue.top_transactions() } - fn pending_transactions(&self) -> Vec { + fn pending_transactions(&self, best_block: BlockNumber) -> Vec { let queue = self.transaction_queue.lock(); - let sw = self.sealing_work.lock(); - // TODO: should only use the sealing_work when it's current (it could be an old block) - let sealing_set = match sw.enabled { - true => sw.queue.peek_last_ref(), - false => None, - }; - match (&self.options.pending_set, sealing_set) { - (&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.top_transactions(), - (_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().to_owned()), + match self.options.pending_set { + PendingSet::AlwaysQueue => queue.top_transactions(), + PendingSet::SealingOrElseQueue => { + self.from_pending_block( + best_block, + || queue.top_transactions(), + |sealing| sealing.transactions().to_owned() + ) + }, + PendingSet::AlwaysSealing => { + self.from_pending_block( + best_block, + || vec![], + |sealing| sealing.transactions().to_owned() + ) + }, } } - fn pending_transactions_hashes(&self) -> Vec { + fn pending_transactions_hashes(&self, best_block: BlockNumber) -> Vec { let queue = self.transaction_queue.lock(); - let sw = self.sealing_work.lock(); - let sealing_set = match sw.enabled { - true => sw.queue.peek_last_ref(), - false => None, - }; - match (&self.options.pending_set, sealing_set) { - (&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.pending_hashes(), - (_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().iter().map(|t| t.hash()).collect()), + match self.options.pending_set { + PendingSet::AlwaysQueue => queue.pending_hashes(), + PendingSet::SealingOrElseQueue => { + self.from_pending_block( + best_block, + || queue.pending_hashes(), + |sealing| sealing.transactions().iter().map(|t| t.hash()).collect() + ) + }, + PendingSet::AlwaysSealing => { + self.from_pending_block( + best_block, + || vec![], + |sealing| sealing.transactions().iter().map(|t| t.hash()).collect() + ) + }, } } - fn transaction(&self, hash: &H256) -> Option { + fn transaction(&self, best_block: BlockNumber, hash: &H256) -> Option { let queue = self.transaction_queue.lock(); - let sw = self.sealing_work.lock(); - let sealing_set = match sw.enabled { - true => sw.queue.peek_last_ref(), - false => None, - }; - match (&self.options.pending_set, sealing_set) { - (&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.find(hash), - (_, sealing) => sealing.and_then(|s| s.transactions().iter().find(|t| &t.hash() == hash).cloned()), + match self.options.pending_set { + PendingSet::AlwaysQueue => queue.find(hash), + PendingSet::SealingOrElseQueue => { + self.from_pending_block( + best_block, + || queue.find(hash), + |sealing| sealing.transactions().iter().find(|t| &t.hash() == hash).cloned() + ) + }, + PendingSet::AlwaysSealing => { + self.from_pending_block( + best_block, + || None, + |sealing| sealing.transactions().iter().find(|t| &t.hash() == hash).cloned() + ) + }, } } - fn pending_receipt(&self, hash: &H256) -> Option { - let sealing_work = self.sealing_work.lock(); - match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) { - (true, Some(pending)) => { + fn pending_receipt(&self, best_block: BlockNumber, hash: &H256) -> Option { + self.from_pending_block( + best_block, + || None, + |pending| { let txs = pending.transactions(); txs.iter() .map(|t| t.hash()) @@ -807,15 +874,15 @@ impl MinerService for Miner { logs: receipt.logs.clone(), } }) - }, - _ => None - } + } + ) } - fn pending_receipts(&self) -> BTreeMap { - let sealing_work = self.sealing_work.lock(); - match (sealing_work.enabled, sealing_work.queue.peek_last_ref()) { - (true, Some(pending)) => { + fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap { + self.from_pending_block( + best_block, + || BTreeMap::new(), + |pending| { let hashes = pending.transactions() .iter() .map(|t| t.hash()); @@ -823,9 +890,8 @@ impl MinerService for Miner { let receipts = pending.receipts().iter().cloned(); hashes.zip(receipts).collect() - }, - _ => BTreeMap::new() - } + } + ) } fn last_nonce(&self, address: &Address) -> Option { @@ -870,15 +936,24 @@ impl MinerService for Miner { } fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let result = if let Some(b) = self.sealing_work.lock().queue.get_used_if(if self.options.enable_resubmission { GetAction::Clone } else { GetAction::Take }, |b| &b.hash() == &pow_hash) { - b.lock().try_seal(&*self.engine, seal).or_else(|_| { - warn!(target: "miner", "Mined solution rejected: Invalid."); - Err(Error::PowInvalid) - }) - } else { - warn!(target: "miner", "Mined solution rejected: Block unknown or out of date."); - Err(Error::PowHashInvalid) - }; + let result = + if let Some(b) = self.sealing_work.lock().queue.get_used_if( + if self.options.enable_resubmission { + GetAction::Clone + } else { + GetAction::Take + }, + |b| &b.hash() == &pow_hash + ) { + trace!(target: "miner", "Sealing block {}={}={} with seal {:?}", pow_hash, b.hash(), b.header().bare_hash(), seal); + b.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| { + warn!(target: "miner", "Mined solution rejected: {}", e); + Err(Error::PowInvalid) + }) + } else { + warn!(target: "miner", "Mined solution rejected: Block unknown or out of date."); + Err(Error::PowHashInvalid) + }; result.and_then(|sealed| { let n = sealed.header().number(); let h = sealed.header().hash(); @@ -921,7 +996,7 @@ impl MinerService for Miner { out_of_chain.for_each(|txs| { let mut transaction_queue = self.transaction_queue.lock(); let _ = self.add_transactions_to_queue( - chain, txs, TransactionOrigin::External, &mut transaction_queue + chain, txs, TransactionOrigin::RetractedBlock, &mut transaction_queue ); }); } @@ -1013,6 +1088,7 @@ mod tests { reseal_min_period: Duration::from_secs(5), tx_gas_limit: !U256::zero(), tx_queue_size: 1024, + tx_queue_gas_limit: GasLimit::None, pending_set: PendingSet::AlwaysSealing, work_queue_size: 5, enable_resubmission: true, @@ -1041,34 +1117,54 @@ mod tests { let client = TestBlockChainClient::default(); let miner = miner(); let transaction = transaction(); + let best_block = 0; // when let res = miner.import_own_transaction(&client, transaction); // then assert_eq!(res.unwrap(), TransactionImportResult::Current); assert_eq!(miner.all_transactions().len(), 1); - assert_eq!(miner.pending_transactions().len(), 1); - assert_eq!(miner.pending_transactions_hashes().len(), 1); - assert_eq!(miner.pending_receipts().len(), 1); + assert_eq!(miner.pending_transactions(best_block).len(), 1); + assert_eq!(miner.pending_transactions_hashes(best_block).len(), 1); + assert_eq!(miner.pending_receipts(best_block).len(), 1); // This method will let us know if pending block was created (before calling that method) assert!(!miner.prepare_work_sealing(&client)); } + #[test] + fn should_not_use_pending_block_if_best_block_is_higher() { + // given + let client = TestBlockChainClient::default(); + let miner = miner(); + let transaction = transaction(); + let best_block = 10; + // when + let res = miner.import_own_transaction(&client, transaction); + + // then + assert_eq!(res.unwrap(), TransactionImportResult::Current); + assert_eq!(miner.all_transactions().len(), 1); + assert_eq!(miner.pending_transactions(best_block).len(), 0); + assert_eq!(miner.pending_transactions_hashes(best_block).len(), 0); + assert_eq!(miner.pending_receipts(best_block).len(), 0); + } + #[test] fn should_import_external_transaction() { // given let client = TestBlockChainClient::default(); let miner = miner(); let transaction = transaction(); + let best_block = 0; // when let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap(); // then assert_eq!(res.unwrap(), TransactionImportResult::Current); assert_eq!(miner.all_transactions().len(), 1); - assert_eq!(miner.pending_transactions_hashes().len(), 0); - assert_eq!(miner.pending_transactions().len(), 0); - assert_eq!(miner.pending_receipts().len(), 0); + assert_eq!(miner.pending_transactions_hashes(best_block).len(), 0); + assert_eq!(miner.pending_transactions(best_block).len(), 0); + assert_eq!(miner.pending_receipts(best_block).len(), 0); // This method will let us know if pending block was created (before calling that method) assert!(miner.prepare_work_sealing(&client)); } diff --git a/ethcore/src/miner/mod.rs b/ethcore/src/miner/mod.rs index e95ce758a..8dfddf483 100644 --- a/ethcore/src/miner/mod.rs +++ b/ethcore/src/miner/mod.rs @@ -48,7 +48,7 @@ mod work_notify; mod price_info; pub use self::transaction_queue::{TransactionQueue, AccountDetails, TransactionOrigin}; -pub use self::miner::{Miner, MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions}; +pub use self::miner::{Miner, MinerOptions, PendingSet, GasPricer, GasPriceCalibratorOptions, GasLimit}; pub use self::external::{ExternalMiner, ExternalMinerService}; pub use client::TransactionImportResult; @@ -56,6 +56,7 @@ use std::collections::BTreeMap; use util::{H256, U256, Address, Bytes}; use client::{MiningBlockChainClient, Executed, CallAnalytics}; use block::ClosedBlock; +use header::BlockNumber; use receipt::{RichReceipt, Receipt}; use error::{Error, CallError}; use transaction::SignedTransaction; @@ -115,7 +116,7 @@ pub trait MinerService : Send + Sync { Result; /// Returns hashes of transactions currently in pending - fn pending_transactions_hashes(&self) -> Vec; + fn pending_transactions_hashes(&self, best_block: BlockNumber) -> Vec; /// Removes all transactions from the queue and restart mining operation. fn clear_and_reset(&self, chain: &MiningBlockChainClient); @@ -135,19 +136,19 @@ pub trait MinerService : Send + Sync { where F: FnOnce(&ClosedBlock) -> T, Self: Sized; /// Query pending transactions for hash. - fn transaction(&self, hash: &H256) -> Option; + fn transaction(&self, best_block: BlockNumber, hash: &H256) -> Option; /// Get a list of all transactions. fn all_transactions(&self) -> Vec; /// Get a list of all pending transactions. - fn pending_transactions(&self) -> Vec; + fn pending_transactions(&self, best_block: BlockNumber) -> Vec; /// Get a list of all pending receipts. - fn pending_receipts(&self) -> BTreeMap; + fn pending_receipts(&self, best_block: BlockNumber) -> BTreeMap; /// Get a particular reciept. - fn pending_receipt(&self, hash: &H256) -> Option; + fn pending_receipt(&self, best_block: BlockNumber, hash: &H256) -> Option; /// Returns highest transaction nonce for given address. fn last_nonce(&self, address: &Address) -> Option; diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 7db65eacb..fdb652780 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -98,6 +98,8 @@ pub enum TransactionOrigin { Local, /// External transaction received from network External, + /// Transactions from retracted blocks + RetractedBlock, } impl PartialOrd for TransactionOrigin { @@ -112,10 +114,11 @@ impl Ord for TransactionOrigin { return Ordering::Equal; } - if *self == TransactionOrigin::Local { - Ordering::Less - } else { - Ordering::Greater + match (*self, *other) { + (TransactionOrigin::RetractedBlock, _) => Ordering::Less, + (_, TransactionOrigin::RetractedBlock) => Ordering::Greater, + (TransactionOrigin::Local, _) => Ordering::Less, + _ => Ordering::Greater, } } } @@ -127,6 +130,8 @@ struct TransactionOrder { /// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5) /// High nonce_height = Low priority (processed later) nonce_height: U256, + /// Gas specified in the transaction. + gas: U256, /// Gas Price of the transaction. /// Low gas price = Low priority (processed later) gas_price: U256, @@ -143,6 +148,7 @@ impl TransactionOrder { fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { TransactionOrder { nonce_height: tx.nonce() - base_nonce, + gas: tx.transaction.gas.clone(), gas_price: tx.transaction.gas_price, hash: tx.hash(), origin: tx.origin, @@ -284,6 +290,7 @@ struct TransactionSet { by_address: Table, by_gas_price: GasPriceQueue, limit: usize, + gas_limit: U256, } impl TransactionSet { @@ -314,15 +321,20 @@ impl TransactionSet { /// It drops transactions from this set but also removes associated `VerifiedTransaction`. /// Returns addresses and lowest nonces of transactions removed because of limit. fn enforce_limit(&mut self, by_hash: &mut HashMap) -> Option> { - let len = self.by_priority.len(); - if len <= self.limit { - return None; - } - + let mut count = 0; + let mut gas: U256 = 0.into(); let to_drop : Vec<(Address, U256)> = { self.by_priority .iter() - .skip(self.limit) + .skip_while(|order| { + count = count + 1; + let r = gas.overflowing_add(order.gas); + if r.1 { return false } + gas = r.0; + // Own and retracted transactions are allowed to go above the gas limit, bot not above the count limit. + (gas <= self.gas_limit || order.origin == TransactionOrigin::Local || order.origin == TransactionOrigin::RetractedBlock) && + count <= self.limit + }) .map(|order| by_hash.get(&order.hash) .expect("All transactions in `self.by_priority` and `self.by_address` are kept in sync with `by_hash`.")) .map(|tx| (tx.sender(), tx.nonce())) @@ -429,16 +441,17 @@ impl Default for TransactionQueue { impl TransactionQueue { /// Creates new instance of this Queue pub fn new() -> Self { - Self::with_limits(1024, !U256::zero()) + Self::with_limits(1024, !U256::zero(), !U256::zero()) } /// Create new instance of this Queue with specified limits - pub fn with_limits(limit: usize, tx_gas_limit: U256) -> Self { + pub fn with_limits(limit: usize, gas_limit: U256, tx_gas_limit: U256) -> Self { let current = TransactionSet { by_priority: BTreeSet::new(), by_address: Table::new(), by_gas_price: Default::default(), limit: limit, + gas_limit: gas_limit, }; let future = TransactionSet { @@ -446,6 +459,7 @@ impl TransactionQueue { by_address: Table::new(), by_gas_price: Default::default(), limit: limit, + gas_limit: gas_limit, }; TransactionQueue { @@ -501,6 +515,13 @@ impl TransactionQueue { }; } + /// Sets new total gas limit. + pub fn set_total_gas_limit(&mut self, gas_limit: U256) { + self.future.gas_limit = gas_limit; + self.current.gas_limit = gas_limit; + self.future.enforce_limit(&mut self.by_hash); + } + /// Set the new limit for the amount of gas any individual transaction may have. /// Any transaction already imported to the queue is not affected. pub fn set_tx_gas_limit(&mut self, limit: U256) { @@ -633,7 +654,7 @@ impl TransactionQueue { }; for k in nonces_from_sender { let order = self.future.drop(&sender, &k).unwrap(); - self.current.insert(sender, k, order.penalize()); + self.future.insert(sender, k, order.penalize()); } } @@ -711,7 +732,10 @@ impl TransactionQueue { let order = self.current.drop(sender, &k).expect("iterating over a collection that has been retrieved above; qed"); if k >= current_nonce { - self.future.insert(*sender, k, order.update_height(k, current_nonce)); + let order = order.update_height(k, current_nonce); + if let Some(old) = self.future.insert(*sender, k, order.clone()) { + Self::replace_orders(*sender, k, old, order, &mut self.future, &mut self.by_hash); + } } else { trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce); self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`"); @@ -729,6 +753,15 @@ impl TransactionQueue { .collect() } + #[cfg(test)] + fn future_transactions(&self) -> Vec { + self.future.by_priority + .iter() + .map(|t| self.by_hash.get(&t.hash).expect("All transactions in `current` and `future` are always included in `by_hash`")) + .map(|t| t.transaction.clone()) + .collect() + } + /// Returns hashes of all transactions from current, ordered by priority. pub fn pending_hashes(&self) -> Vec { self.current.by_priority @@ -776,7 +809,9 @@ impl TransactionQueue { self.future.by_gas_price.remove(&order.gas_price, &order.hash); // Put to current let order = order.update_height(current_nonce, first_nonce); - self.current.insert(address, current_nonce, order); + if let Some(old) = self.current.insert(address, current_nonce, order.clone()) { + Self::replace_orders(address, current_nonce, old, order, &mut self.current, &mut self.by_hash); + } update_last_nonce_to = Some(current_nonce); current_nonce = current_nonce + U256::one(); } @@ -810,47 +845,59 @@ impl TransactionQueue { let nonce = tx.nonce(); let hash = tx.hash(); - let next_nonce = self.last_nonces - .get(&address) - .cloned() - .map_or(state_nonce, |n| n + U256::one()); + { + // Rough size sanity check + let gas = &tx.transaction.gas; + if U256::from(tx.transaction.data.len()) > *gas { + // Droping transaction + trace!(target: "txqueue", "Dropping oversized transaction: {:?} (gas: {} < size {})", hash, gas, tx.transaction.data.len()); + return Err(TransactionError::LimitReached); + } + } // The transaction might be old, let's check that. // This has to be the first test, otherwise calculating // nonce height would result in overflow. if nonce < state_nonce { // Droping transaction - trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce); + trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, state_nonce); return Err(TransactionError::Old); - } else if nonce > next_nonce { + } + + // Update nonces of transactions in future (remove old transactions) + self.update_future(&address, state_nonce); + // State nonce could be updated. Maybe there are some more items waiting in future? + self.move_matching_future_to_current(address, state_nonce, state_nonce); + // Check the next expected nonce (might be updated by move above) + let next_nonce = self.last_nonces + .get(&address) + .cloned() + .map_or(state_nonce, |n| n + U256::one()); + + // Future transaction + if nonce > next_nonce { // We have a gap - put to future. - // Update nonces of transactions in future (remove old transactions) - self.update_future(&address, state_nonce); // Insert transaction (or replace old one with lower gas price) try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash))); - // Return an error if this transaction is not imported because of limit. - try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash))); + // Enforce limit in Future + let removed = self.future.enforce_limit(&mut self.by_hash); + // Return an error if this transaction was not imported because of limit. + try!(check_if_removed(&address, &nonce, removed)); debug!(target: "txqueue", "Importing transaction to future: {:?}", hash); debug!(target: "txqueue", "status: {:?}", self.status()); return Ok(TransactionImportResult::Future); } + + // We might have filled a gap - move some more transactions from future + self.move_matching_future_to_current(address, nonce, state_nonce); + self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce); + + // Replace transaction if any try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash))); // Keep track of highest nonce stored in current let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n)); self.last_nonces.insert(address, new_max); - // Update nonces of transactions in future - self.update_future(&address, state_nonce); - // Maybe there are some more items waiting in future? - self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce); - // There might be exactly the same transaction waiting in future - // same (sender, nonce), but above function would not move it. - if let Some(order) = self.future.drop(&address, &nonce) { - // Let's insert that transaction to current (if it has higher gas_price) - let future_tx = self.by_hash.remove(&order.hash).expect("All transactions in `future` are always in `by_hash`."); - // if transaction in `current` (then one we are importing) is replaced it means that it has to low gas_price - try!(check_too_cheap(!Self::replace_transaction(future_tx, state_nonce, &mut self.current, &mut self.by_hash))); - } // Also enforce the limit let removed = self.current.enforce_limit(&mut self.by_hash); @@ -895,24 +942,28 @@ impl TransactionQueue { if let Some(old) = set.insert(address, nonce, order.clone()) { - // There was already transaction in queue. Let's check which one should stay - let old_fee = old.gas_price; - let new_fee = order.gas_price; - if old_fee.cmp(&new_fee) == Ordering::Greater { - // Put back old transaction since it has greater priority (higher gas_price) - set.insert(address, nonce, old); - // and remove new one - by_hash.remove(&hash).expect("The hash has been just inserted and no other line is altering `by_hash`."); - false - } else { - // Make sure we remove old transaction entirely - by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`."); - true - } + Self::replace_orders(address, nonce, old, order, set, by_hash) } else { true } } + + fn replace_orders(address: Address, nonce: U256, old: TransactionOrder, order: TransactionOrder, set: &mut TransactionSet, by_hash: &mut HashMap) -> bool { + // There was already transaction in queue. Let's check which one should stay + let old_fee = old.gas_price; + let new_fee = order.gas_price; + if old_fee.cmp(&new_fee) == Ordering::Greater { + // Put back old transaction since it has greater priority (higher gas_price) + set.insert(address, nonce, old); + // and remove new one + by_hash.remove(&order.hash).expect("The hash has been just inserted and no other line is altering `by_hash`."); + false + } else { + // Make sure we remove old transaction entirely + by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`."); + true + } + } } fn check_too_cheap(is_in: bool) -> Result<(), TransactionError> { @@ -956,6 +1007,7 @@ mod test { } fn default_nonce() -> U256 { 123.into() } + fn default_gas_val() -> U256 { 100_000.into() } fn default_gas_price() -> U256 { 1.into() } fn new_unsigned_tx(nonce: U256, gas_price: U256) -> Transaction { @@ -963,7 +1015,7 @@ mod test { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), + gas: default_gas_val(), gas_price: gas_price, nonce: nonce } @@ -1014,10 +1066,21 @@ mod test { new_tx_pair_default(0.into(), 1.into()) } + #[test] + fn test_ordering() { + assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::External), Ordering::Less); + assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::Local), Ordering::Less); + assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::External), Ordering::Less); + + assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::Local), Ordering::Greater); + assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater); + assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater); + } + #[test] fn should_return_correct_nonces_when_dropped_because_of_limit() { // given - let mut txq = TransactionQueue::with_limits(2, !U256::zero()); + let mut txq = TransactionQueue::with_limits(2, !U256::zero(), !U256::zero()); let (tx1, tx2) = new_tx_pair(123.into(), 1.into(), 1.into(), 0.into()); let sender = tx1.sender().unwrap(); let nonce = tx1.nonce; @@ -1055,7 +1118,8 @@ mod test { by_priority: BTreeSet::new(), by_address: Table::new(), by_gas_price: Default::default(), - limit: 1 + limit: 1, + gas_limit: !U256::zero(), }; let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); let tx1 = VerifiedTransaction::new(tx1, TransactionOrigin::External).unwrap(); @@ -1095,7 +1159,8 @@ mod test { by_priority: BTreeSet::new(), by_address: Table::new(), by_gas_price: Default::default(), - limit: 1 + limit: 1, + gas_limit: !U256::zero(), }; // Create two transactions with same nonce // (same hash) @@ -1143,7 +1208,8 @@ mod test { by_priority: BTreeSet::new(), by_address: Table::new(), by_gas_price: Default::default(), - limit: 2 + limit: 2, + gas_limit: !U256::zero(), }; let tx = new_tx_default(); let tx1 = VerifiedTransaction::new(tx.clone(), TransactionOrigin::External).unwrap(); @@ -1160,7 +1226,8 @@ mod test { by_priority: BTreeSet::new(), by_address: Table::new(), by_gas_price: Default::default(), - limit: 1 + limit: 1, + gas_limit: !U256::zero(), }; assert_eq!(set.gas_price_entry_limit(), 0.into()); @@ -1196,6 +1263,31 @@ mod test { assert_eq!(txq.top_transactions()[0], tx2); } + #[test] + fn should_move_all_transactions_from_future() { + // given + let mut txq = TransactionQueue::new(); + let (tx, tx2) = new_tx_pair_default(1.into(), 1.into()); + let prev_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance: + !U256::zero() }; + + // First insert one transaction to future + let res = txq.add(tx.clone(), &prev_nonce, TransactionOrigin::External); + assert_eq!(res.unwrap(), TransactionImportResult::Future); + assert_eq!(txq.status().future, 1); + + // now import second transaction to current + let res = txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External); + + // then + assert_eq!(res.unwrap(), TransactionImportResult::Current); + assert_eq!(txq.status().pending, 2); + assert_eq!(txq.status().future, 0); + assert_eq!(txq.current.by_priority.len(), 2); + assert_eq!(txq.current.by_address.len(), 2); + assert_eq!(txq.top_transactions()[0], tx); + assert_eq!(txq.top_transactions()[1], tx2); + } #[test] fn should_import_tx() { @@ -1375,6 +1467,27 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_prioritize_reimported_transactions_within_same_nonce_height() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_tx_default(); + // the second one has same nonce but higher `gas_price` + let (_, tx2) = new_similar_tx_pair(); + + // when + // first insert local one with higher gas price + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); + // then the one with lower gas price, but from retracted block + txq.add(tx.clone(), &default_account_details, TransactionOrigin::RetractedBlock).unwrap(); + + // then + let top = txq.top_transactions(); + assert_eq!(top[0], tx); // retracted should be first + assert_eq!(top[1], tx2); + assert_eq!(top.len(), 2); + } + #[test] fn should_not_prioritize_local_transactions_with_different_nonce_height() { // given @@ -1392,6 +1505,36 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_penalize_transactions_from_sender_in_future() { + // given + let prev_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance: !U256::zero() }; + let mut txq = TransactionQueue::new(); + // txa, txb - slightly bigger gas price to have consistent ordering + let (txa, txb) = new_tx_pair_default(1.into(), 0.into()); + let (tx1, tx2) = new_tx_pair_with_gas_price_increment(3.into()); + + // insert everything + txq.add(txa.clone(), &prev_nonce, TransactionOrigin::External).unwrap(); + txq.add(txb.clone(), &prev_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx1.clone(), &prev_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &prev_nonce, TransactionOrigin::External).unwrap(); + + assert_eq!(txq.status().future, 4); + + // when + txq.penalize(&tx1.hash()); + + // then + let top = txq.future_transactions(); + assert_eq!(top[0], txa); + assert_eq!(top[1], txb); + assert_eq!(top[2], tx1); + assert_eq!(top[3], tx2); + assert_eq!(top.len(), 4); + } + + #[test] fn should_penalize_transactions_from_sender() { // given @@ -1580,7 +1723,7 @@ mod test { #[test] fn should_drop_old_transactions_when_hitting_the_limit() { // given - let mut txq = TransactionQueue::with_limits(1, !U256::zero()); + let mut txq = TransactionQueue::with_limits(1, !U256::zero(), !U256::zero()); let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); let sender = tx.sender().unwrap(); let nonce = tx.nonce; @@ -1601,7 +1744,7 @@ mod test { #[test] fn should_limit_future_transactions() { - let mut txq = TransactionQueue::with_limits(1, !U256::zero()); + let mut txq = TransactionQueue::with_limits(1, !U256::zero(), !U256::zero()); txq.current.set_limit(10); let (tx1, tx2) = new_tx_pair_default(4.into(), 1.into()); let (tx3, tx4) = new_tx_pair_default(4.into(), 2.into()); @@ -1618,6 +1761,30 @@ mod test { assert_eq!(txq.status().future, 1); } + #[test] + fn should_limit_by_gas() { + let mut txq = TransactionQueue::with_limits(100, default_gas_val() * U256::from(2), !U256::zero()); + let (tx1, tx2) = new_tx_pair_default(U256::from(1), U256::from(1)); + let (tx3, tx4) = new_tx_pair_default(U256::from(1), U256::from(2)); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).ok(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).ok(); + txq.add(tx3.clone(), &default_account_details, TransactionOrigin::External).ok(); + txq.add(tx4.clone(), &default_account_details, TransactionOrigin::External).ok(); + assert_eq!(txq.status().pending, 2); + } + + #[test] + fn should_keep_own_transactions_above_gas_limit() { + let mut txq = TransactionQueue::with_limits(100, default_gas_val() * U256::from(2), !U256::zero()); + let (tx1, tx2) = new_tx_pair_default(U256::from(1), U256::from(1)); + let (tx3, tx4) = new_tx_pair_default(U256::from(1), U256::from(2)); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); + txq.add(tx3.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); + txq.add(tx4.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); + assert_eq!(txq.status().pending, 4); + } + #[test] fn should_drop_transactions_with_old_nonces() { let mut txq = TransactionQueue::new(); @@ -1861,7 +2028,7 @@ mod test { #[test] fn should_keep_right_order_in_future() { // given - let mut txq = TransactionQueue::with_limits(1, !U256::zero()); + let mut txq = TransactionQueue::with_limits(1, !U256::zero(), !U256::zero()); let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); let prev_nonce = |a: &Address| AccountDetails { nonce: default_account_details(a).nonce - U256::one(), balance: default_account_details(a).balance }; diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 703d61742..a92e03ebc 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -48,7 +48,7 @@ impl PodAccount { PodAccount { balance: *acc.balance(), nonce: *acc.nonce(), - storage: acc.storage_overlay().iter().fold(BTreeMap::new(), |mut m, (k, &(_, ref v))| {m.insert(k.clone(), v.clone()); m}), + storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}), code: acc.code().map(|x| x.to_vec()), } } diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index c9c9b3f1b..1217b90ed 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -82,7 +82,13 @@ impl ClientService { } let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - db_config.cache_size = config.db_cache_size; + + // give all rocksdb cache to state column; everything else has its + // own caches. + if let Some(size) = config.db_cache_size { + db_config.set_cache(::db::COL_STATE, size); + } + db_config.compaction = config.db_compaction.compaction_profile(); db_config.wal = config.db_wal; @@ -92,7 +98,7 @@ impl ClientService { let snapshot_params = SnapServiceParams { engine: spec.engine.clone(), genesis_block: spec.genesis_block(), - db_config: db_config, + db_config: db_config.clone(), pruning: pruning, channel: io_service.channel(), snapshot_root: snapshot_path.into(), diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 8cfc4c96b..bc1faea3f 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -205,7 +205,7 @@ impl Account { #[cfg(test)] mod tests { use account_db::{AccountDB, AccountDBMut}; - use tests::helpers::get_temp_journal_db; + use tests::helpers::get_temp_state_db; use snapshot::tests::helpers::fill_storage; use util::sha3::{SHA3_EMPTY, SHA3_NULL_RLP}; @@ -218,8 +218,7 @@ mod tests { #[test] fn encoding_basic() { - let mut db = get_temp_journal_db(); - let mut db = &mut **db; + let mut db = get_temp_state_db(); let addr = Address::random(); let account = Account { @@ -239,8 +238,7 @@ mod tests { #[test] fn encoding_storage() { - let mut db = get_temp_journal_db(); - let mut db = &mut **db; + let mut db = get_temp_state_db(); let addr = Address::random(); let account = { @@ -265,8 +263,7 @@ mod tests { #[test] fn encoding_code() { - let mut db = get_temp_journal_db(); - let mut db = &mut **db; + let mut db = get_temp_state_db(); let addr1 = Address::random(); let addr2 = Address::random(); diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 2074f8174..a5e6b58bd 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -43,13 +43,15 @@ use self::account::Account; use self::block::AbridgedBlock; use self::io::SnapshotWriter; +use super::state_db::StateDB; + use crossbeam::{scope, ScopedJoinHandle}; use rand::{Rng, OsRng}; pub use self::error::Error; pub use self::service::{Service, DatabaseRestore}; -pub use self::traits::{SnapshotService, RemoteSnapshotService}; +pub use self::traits::SnapshotService; pub use self::watcher::Watcher; pub use types::snapshot_manifest::ManifestData; pub use types::restoration_status::RestorationStatus; @@ -65,6 +67,12 @@ mod watcher; #[cfg(test)] mod tests; +/// IPC interfaces +#[cfg(feature="ipc")] +pub mod remote { + pub use super::traits::RemoteSnapshotService; +} + mod traits { #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues include!(concat!(env!("OUT_DIR"), "/snapshot_service_trait.rs")); @@ -454,6 +462,10 @@ impl StateRebuilder { self.code_map.insert(code_hash, code); } + let backing = self.db.backing().clone(); + + // bloom has to be updated + let mut bloom = StateDB::load_bloom(&backing); // batch trie writes { @@ -464,12 +476,14 @@ impl StateRebuilder { }; for (hash, thin_rlp) in pairs { + bloom.set(&*hash); try!(account_trie.insert(&hash, &thin_rlp)); } } - let backing = self.db.backing().clone(); + let bloom_journal = bloom.drain_journal(); let mut batch = backing.transaction(); + try!(StateDB::commit_bloom(&mut batch, bloom_journal)); try!(self.db.inject(&mut batch)); try!(backing.write(batch).map_err(::util::UtilError::SimpleString)); trace!(target: "snapshot", "current state root: {:?}", self.state_root); diff --git a/ethcore/src/snapshot/snapshot_service_trait.rs b/ethcore/src/snapshot/snapshot_service_trait.rs index 7df90c943..65448090f 100644 --- a/ethcore/src/snapshot/snapshot_service_trait.rs +++ b/ethcore/src/snapshot/snapshot_service_trait.rs @@ -22,7 +22,6 @@ use ipc::IpcConfig; /// This handles: /// - restoration of snapshots to temporary databases. /// - responding to queries for snapshot manifests and chunks -#[derive(Ipc)] #[ipc(client_ident="RemoteSnapshotService")] pub trait SnapshotService : Sync + Send { /// Query the most recent manifest data. diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 732eda717..7456cbeb2 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -20,6 +20,7 @@ use common::*; use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound}; use pod_state::*; use account_db::*; +use state_db::StateDB; use super::genesis::Genesis; use super::seal::Generic as GenericSeal; use ethereum; @@ -36,6 +37,8 @@ pub struct CommonParams { pub maximum_extra_data_size: usize, /// Network id. pub network_id: U256, + /// Main subprotocol name. + pub subprotocol_name: String, /// Minimum gas limit. pub min_gas_limit: U256, /// Fork block to check. @@ -48,6 +51,7 @@ impl From for CommonParams { account_start_nonce: p.account_start_nonce.into(), maximum_extra_data_size: p.maximum_extra_data_size.into(), network_id: p.network_id.into(), + subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()), min_gas_limit: p.min_gas_limit.into(), fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None }, } @@ -156,6 +160,9 @@ impl Spec { /// Get the configured Network ID. pub fn network_id(&self) -> U256 { self.params.network_id } + /// Get the configured Network ID. + pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() } + /// Get the configured network fork block. pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params.fork_block } @@ -169,7 +176,7 @@ impl Spec { header.set_transactions_root(self.transactions_root.clone()); header.set_uncles_hash(RlpStream::new_list(0).out().sha3()); header.set_extra_data(self.extra_data.clone()); - header.set_state_root(self.state_root().clone()); + header.set_state_root(self.state_root()); header.set_receipts_root(self.receipts_root.clone()); header.set_log_bloom(H2048::new().clone()); header.set_gas_used(self.gas_used.clone()); @@ -184,6 +191,7 @@ impl Spec { let r = Rlp::new(&seal); (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() }); + trace!(target: "spec", "Header hash is {}", header.hash()); header } @@ -227,19 +235,23 @@ impl Spec { } /// Ensure that the given state DB has the trie nodes in for the genesis state. - pub fn ensure_db_good(&self, db: &mut HashDB) -> Result> { - if !db.contains(&self.state_root()) { + pub fn ensure_db_good(&self, db: &mut StateDB) -> Result> { + if !db.as_hashdb().contains(&self.state_root()) { + trace!(target: "spec", "ensure_db_good: Fresh database? Cannot find state root {}", self.state_root()); let mut root = H256::new(); + { - let mut t = SecTrieDBMut::new(db, &mut root); + let mut t = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root); for (address, account) in self.genesis_state.get().iter() { try!(t.insert(&**address, &account.rlp())); } } + trace!(target: "spec", "ensure_db_good: Populated sec trie; root is {}", root); for (address, account) in self.genesis_state.get().iter() { - account.insert_additional(&mut AccountDBMut::new(db, address)); + db.note_account_bloom(address); + account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address)); } - assert!(db.contains(&self.state_root())); + assert!(db.as_hashdb().contains(&self.state_root())); Ok(true) } else { Ok(false) } } diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index cdd430290..79a9e8ef1 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -16,15 +16,18 @@ //! Single account in the system. -use std::collections::hash_map::Entry; use util::*; use pod_account::*; use rlp::*; +use lru_cache::LruCache; -use std::cell::{Ref, RefCell, Cell}; +use std::cell::{RefCell, Cell}; + +const STORAGE_CACHE_ITEMS: usize = 8192; /// Single account in the system. -#[derive(Clone)] +/// Keeps track of changes to the code and storage. +/// The changes are applied in `commit_storage` and `commit_code` pub struct Account { // Balance of the account. balance: U256, @@ -32,14 +35,20 @@ pub struct Account { nonce: U256, // Trie-backed storage. storage_root: H256, - // Overlay on trie-backed storage - tuple is (, ). - storage_overlay: RefCell>, - // Code hash of the account. If None, means that it's a contract whose code has not yet been set. - code_hash: Option, + // LRU Cache of the trie-backed storage. + // This is limited to `STORAGE_CACHE_ITEMS` recent queries + storage_cache: RefCell>, + // Modified storage. Accumulates changes to storage made in `set_storage` + // Takes precedence over `storage_cache`. + storage_changes: HashMap, + // Code hash of the account. + code_hash: H256, + // Size of the accoun code. + code_size: Option, // Code cache of the account. - code_cache: Bytes, - // Account is new or has been modified - filth: Filth, + code_cache: Arc, + // Account code new or has been modified. + code_filth: Filth, // Cached address hash. address_hash: Cell>, } @@ -52,24 +61,32 @@ impl Account { balance: balance, nonce: nonce, storage_root: SHA3_NULL_RLP, - storage_overlay: RefCell::new(storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()), - code_hash: Some(code.sha3()), - code_cache: code, - filth: Filth::Dirty, + storage_cache: Self::empty_storage_cache(), + storage_changes: storage, + code_hash: code.sha3(), + code_size: Some(code.len()), + code_cache: Arc::new(code), + code_filth: Filth::Dirty, address_hash: Cell::new(None), } } + fn empty_storage_cache() -> RefCell> { + RefCell::new(LruCache::new(STORAGE_CACHE_ITEMS)) + } + /// General constructor. pub fn from_pod(pod: PodAccount) -> Account { Account { balance: pod.balance, nonce: pod.nonce, storage_root: SHA3_NULL_RLP, - storage_overlay: RefCell::new(pod.storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()), - code_hash: pod.code.as_ref().map(|c| c.sha3()), - code_cache: pod.code.as_ref().map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c.clone()), - filth: Filth::Dirty, + storage_cache: Self::empty_storage_cache(), + storage_changes: pod.storage.into_iter().collect(), + code_hash: pod.code.as_ref().map_or(SHA3_EMPTY, |c| c.sha3()), + code_filth: Filth::Dirty, + code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())), + code_cache: Arc::new(pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c)), address_hash: Cell::new(None), } } @@ -80,10 +97,12 @@ impl Account { balance: balance, nonce: nonce, storage_root: SHA3_NULL_RLP, - storage_overlay: RefCell::new(HashMap::new()), - code_hash: Some(SHA3_EMPTY), - code_cache: vec![], - filth: Filth::Dirty, + storage_cache: Self::empty_storage_cache(), + storage_changes: HashMap::new(), + code_hash: SHA3_EMPTY, + code_cache: Arc::new(vec![]), + code_size: Some(0), + code_filth: Filth::Clean, address_hash: Cell::new(None), } } @@ -95,10 +114,12 @@ impl Account { nonce: r.val_at(0), balance: r.val_at(1), storage_root: r.val_at(2), - storage_overlay: RefCell::new(HashMap::new()), - code_hash: Some(r.val_at(3)), - code_cache: vec![], - filth: Filth::Clean, + storage_cache: Self::empty_storage_cache(), + storage_changes: HashMap::new(), + code_hash: r.val_at(3), + code_cache: Arc::new(vec![]), + code_size: None, + code_filth: Filth::Clean, address_hash: Cell::new(None), } } @@ -110,10 +131,12 @@ impl Account { balance: balance, nonce: nonce, storage_root: SHA3_NULL_RLP, - storage_overlay: RefCell::new(HashMap::new()), - code_hash: None, - code_cache: vec![], - filth: Filth::Dirty, + storage_cache: Self::empty_storage_cache(), + storage_changes: HashMap::new(), + code_hash: SHA3_EMPTY, + code_cache: Arc::new(vec![]), + code_size: None, + code_filth: Filth::Clean, address_hash: Cell::new(None), } } @@ -121,46 +144,52 @@ impl Account { /// Set this account's code to the given code. /// NOTE: Account should have been created with `new_contract()` pub fn init_code(&mut self, code: Bytes) { - assert!(self.code_hash.is_none()); - self.code_cache = code; - self.filth = Filth::Dirty; + self.code_hash = code.sha3(); + self.code_cache = Arc::new(code); + self.code_size = Some(self.code_cache.len()); + self.code_filth = Filth::Dirty; } /// Reset this account's code to the given code. pub fn reset_code(&mut self, code: Bytes) { - self.code_hash = None; self.init_code(code); } /// Set (and cache) the contents of the trie's storage at `key` to `value`. pub fn set_storage(&mut self, key: H256, value: H256) { - match self.storage_overlay.borrow_mut().entry(key) { - Entry::Occupied(ref mut entry) if entry.get().1 != value => { - entry.insert((Filth::Dirty, value)); - self.filth = Filth::Dirty; - }, - Entry::Vacant(entry) => { - entry.insert((Filth::Dirty, value)); - self.filth = Filth::Dirty; - }, - _ => (), - } + self.storage_changes.insert(key, value); } /// Get (and cache) the contents of the trie's storage at `key`. + /// Takes modifed storage into account. pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { - self.storage_overlay.borrow_mut().entry(key.clone()).or_insert_with(||{ - let db = SecTrieDB::new(db, &self.storage_root) - .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ - SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ - using it will not fail."); + if let Some(value) = self.cached_storage_at(key) { + return value; + } + let db = SecTrieDB::new(db, &self.storage_root) + .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ + SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ + using it will not fail."); - let item: U256 = match db.get(key){ - Ok(x) => x.map_or_else(U256::zero, decode), - Err(e) => panic!("Encountered potential DB corruption: {}", e), - }; - (Filth::Clean, item.into()) - }).1.clone() + let item: U256 = match db.get(key){ + Ok(x) => x.map_or_else(U256::zero, decode), + Err(e) => panic!("Encountered potential DB corruption: {}", e), + }; + let value: H256 = item.into(); + self.storage_cache.borrow_mut().insert(key.clone(), value.clone()); + value + } + + /// Get cached storage value if any. Returns `None` if the + /// key is not in the cache. + pub fn cached_storage_at(&self, key: &H256) -> Option { + if let Some(value) = self.storage_changes.get(key) { + return Some(value.clone()) + } + if let Some(value) = self.storage_cache.borrow_mut().get_mut(key) { + return Some(value.clone()) + } + None } /// return the balance associated with this account. @@ -169,10 +198,9 @@ impl Account { /// return the nonce associated with this account. pub fn nonce(&self) -> &U256 { &self.nonce } - #[cfg(test)] /// return the code hash associated with this account. pub fn code_hash(&self) -> H256 { - self.code_hash.clone().unwrap_or(SHA3_EMPTY) + self.code_hash.clone() } /// return the code hash associated with this account. @@ -187,41 +215,35 @@ impl Account { /// returns the account's code. If `None` then the code cache isn't available - /// get someone who knows to call `note_code`. - pub fn code(&self) -> Option<&[u8]> { - match self.code_hash { - Some(c) if c == SHA3_EMPTY && self.code_cache.is_empty() => Some(&self.code_cache), - Some(_) if !self.code_cache.is_empty() => Some(&self.code_cache), - None => Some(&self.code_cache), - _ => None, + pub fn code(&self) -> Option> { + if self.code_hash != SHA3_EMPTY && self.code_cache.is_empty() { + return None; } + Some(self.code_cache.clone()) + } + + /// returns the account's code size. If `None` then the code cache or code size cache isn't available - + /// get someone who knows to call `note_code`. + pub fn code_size(&self) -> Option { + self.code_size.clone() } #[cfg(test)] /// Provide a byte array which hashes to the `code_hash`. returns the hash as a result. pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> { let h = code.sha3(); - match self.code_hash { - Some(ref i) if h == *i => { - self.code_cache = code; - Ok(()) - }, - _ => Err(h) + if self.code_hash == h { + self.code_cache = Arc::new(code); + self.code_size = Some(self.code_cache.len()); + Ok(()) + } else { + Err(h) } } /// Is `code_cache` valid; such that code is going to return Some? pub fn is_cached(&self) -> bool { - !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == Some(SHA3_EMPTY)) - } - - /// Is this a new or modified account? - pub fn is_dirty(&self) -> bool { - self.filth == Filth::Dirty - } - - /// Mark account as clean. - pub fn set_clean(&mut self) { - self.filth = Filth::Clean + !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == SHA3_EMPTY) } /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. @@ -229,83 +251,100 @@ impl Account { // TODO: fill out self.code_cache; trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.is_cached() || - match self.code_hash { - Some(ref h) => match db.get(h) { - Some(x) => { self.code_cache = x.to_vec(); true }, + match db.get(&self.code_hash) { + Some(x) => { + self.code_cache = Arc::new(x.to_vec()); + self.code_size = Some(x.len()); + true + }, + _ => { + warn!("Failed reverse get of {}", self.code_hash); + false + }, + } + } + + /// Provide a database to get `code_size`. Should not be called if it is a contract without code. + pub fn cache_code_size(&mut self, db: &HashDB) -> bool { + // TODO: fill out self.code_cache; + trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); + self.code_size.is_some() || + if self.code_hash != SHA3_EMPTY { + match db.get(&self.code_hash) { + Some(x) => { + self.code_size = Some(x.len()); + true + }, _ => { - warn!("Failed reverse get of {}", h); + warn!("Failed reverse get of {}", self.code_hash); false }, - }, - _ => false, + } + } else { + false } } - #[cfg(test)] /// Determine whether there are any un-`commit()`-ed storage-setting operations. - pub fn storage_is_clean(&self) -> bool { self.storage_overlay.borrow().iter().find(|&(_, &(f, _))| f == Filth::Dirty).is_none() } + pub fn storage_is_clean(&self) -> bool { self.storage_changes.is_empty() } #[cfg(test)] /// return the storage root associated with this account or None if it has been altered via the overlay. pub fn storage_root(&self) -> Option<&H256> { if self.storage_is_clean() {Some(&self.storage_root)} else {None} } /// return the storage overlay. - pub fn storage_overlay(&self) -> Ref> { self.storage_overlay.borrow() } + pub fn storage_changes(&self) -> &HashMap { &self.storage_changes } /// Increment the nonce of the account by one. pub fn inc_nonce(&mut self) { self.nonce = self.nonce + U256::from(1u8); - self.filth = Filth::Dirty; } - /// Increment the nonce of the account by one. + /// Increase account balance. pub fn add_balance(&mut self, x: &U256) { - if !x.is_zero() { - self.balance = self.balance + *x; - self.filth = Filth::Dirty; - } + self.balance = self.balance + *x; } - /// Increment the nonce of the account by one. + /// Decrease account balance. /// Panics if balance is less than `x` pub fn sub_balance(&mut self, x: &U256) { - if !x.is_zero() { - assert!(self.balance >= *x); - self.balance = self.balance - *x; - self.filth = Filth::Dirty; - } + assert!(self.balance >= *x); + self.balance = self.balance - *x; } - /// Commit the `storage_overlay` to the backing DB and update `storage_root`. + /// Commit the `storage_changes` to the backing DB and update `storage_root`. pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) { let mut t = trie_factory.from_existing(db, &mut self.storage_root) .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ using it will not fail."); - for (k, &mut (ref mut f, ref mut v)) in self.storage_overlay.borrow_mut().iter_mut() { - if f == &Filth::Dirty { - // cast key and value to trait type, - // so we can call overloaded `to_bytes` method - let res = match v.is_zero() { - true => t.remove(k), - false => t.insert(k, &encode(&U256::from(&*v))), - }; + for (k, v) in self.storage_changes.drain() { + // cast key and value to trait type, + // so we can call overloaded `to_bytes` method + let res = match v.is_zero() { + true => t.remove(&k), + false => t.insert(&k, &encode(&U256::from(&*v))), + }; - if let Err(e) = res { - warn!("Encountered potential DB corruption: {}", e); - } - *f = Filth::Clean; + if let Err(e) = res { + warn!("Encountered potential DB corruption: {}", e); } + self.storage_cache.borrow_mut().insert(k, v); } } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. pub fn commit_code(&mut self, db: &mut HashDB) { - trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty()); - match (self.code_hash.is_none(), self.code_cache.is_empty()) { - (true, true) => self.code_hash = Some(SHA3_EMPTY), + trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty()); + match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) { + (true, true) => { + self.code_size = Some(0); + self.code_filth = Filth::Clean; + }, (true, false) => { - self.code_hash = Some(db.insert(&self.code_cache)); + db.emplace(self.code_hash.clone(), (*self.code_cache).clone()); + self.code_size = Some(self.code_cache.len()); + self.code_filth = Filth::Clean; }, (false, _) => {}, } @@ -317,9 +356,59 @@ impl Account { stream.append(&self.nonce); stream.append(&self.balance); stream.append(&self.storage_root); - stream.append(self.code_hash.as_ref().expect("Cannot form RLP of contract account without code.")); + stream.append(&self.code_hash); stream.out() } + + /// Clone basic account data + pub fn clone_basic(&self) -> Account { + Account { + balance: self.balance.clone(), + nonce: self.nonce.clone(), + storage_root: self.storage_root.clone(), + storage_cache: Self::empty_storage_cache(), + storage_changes: HashMap::new(), + code_hash: self.code_hash.clone(), + code_size: self.code_size.clone(), + code_cache: self.code_cache.clone(), + code_filth: self.code_filth, + address_hash: self.address_hash.clone(), + } + } + + /// Clone account data and dirty storage keys + pub fn clone_dirty(&self) -> Account { + let mut account = self.clone_basic(); + account.storage_changes = self.storage_changes.clone(); + account.code_cache = self.code_cache.clone(); + account + } + + /// Clone account data, dirty storage keys and cached storage keys. + pub fn clone_all(&self) -> Account { + let mut account = self.clone_dirty(); + account.storage_cache = self.storage_cache.clone(); + account + } + + /// Replace self with the data from other account merging storage cache. + /// Basic account data and all modifications are overwritten + /// with new values. + pub fn overwrite_with(&mut self, other: Account) { + self.balance = other.balance; + self.nonce = other.nonce; + self.storage_root = other.storage_root; + self.code_hash = other.code_hash; + self.code_filth = other.code_filth; + self.code_cache = other.code_cache; + self.code_size = other.code_size; + self.address_hash = other.address_hash; + let mut cache = self.storage_cache.borrow_mut(); + for (k, v) in other.storage_cache.into_inner().into_iter() { + cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here + } + self.storage_changes = other.storage_changes; + } } impl fmt::Debug for Account { @@ -415,7 +504,8 @@ mod tests { let mut db = MemoryDB::new(); let mut db = AccountDBMut::new(&mut db, &Address::new()); a.init_code(vec![0x55, 0x44, 0xffu8]); - assert_eq!(a.code_hash(), SHA3_EMPTY); + assert_eq!(a.code_filth, Filth::Dirty); + assert_eq!(a.code_size(), Some(3)); a.commit_code(&mut db); assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb"); } @@ -426,11 +516,12 @@ mod tests { let mut db = MemoryDB::new(); let mut db = AccountDBMut::new(&mut db, &Address::new()); a.init_code(vec![0x55, 0x44, 0xffu8]); - assert_eq!(a.code_hash(), SHA3_EMPTY); + assert_eq!(a.code_filth, Filth::Dirty); a.commit_code(&mut db); + assert_eq!(a.code_filth, Filth::Clean); assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb"); a.reset_code(vec![0x55]); - assert_eq!(a.code_hash(), SHA3_EMPTY); + assert_eq!(a.code_filth, Filth::Dirty); a.commit_code(&mut db); assert_eq!(a.code_hash().hex(), "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be"); } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 844662801..39c8bbc11 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::cell::{RefCell, RefMut}; +use std::collections::hash_map::Entry; use common::*; use engines::Engine; use executive::{Executive, TransactOptions}; @@ -23,6 +24,7 @@ use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; use types::state_diff::StateDiff; +use state_db::StateDB; mod account; mod substate; @@ -41,23 +43,166 @@ pub struct ApplyOutcome { /// Result type for the execution ("application") of a transaction. pub type ApplyResult = Result; +#[derive(Eq, PartialEq, Clone, Copy, Debug)] +/// Account modification state. Used to check if the account was +/// Modified in between commits and overall. +enum AccountState { + /// Account was loaded from disk and never modified in this state object. + CleanFresh, + /// Account was loaded from the global cache and never modified. + CleanCached, + /// Account has been modified and is not committed to the trie yet. + /// This is set if any of the account data is changed, including + /// storage and code. + Dirty, + /// Account was modified and committed to the trie. + Committed, +} + +#[derive(Debug)] +/// In-memory copy of the account data. Holds the optional account +/// and the modification status. +/// Account entry can contain existing (`Some`) or non-existing +/// account (`None`) +struct AccountEntry { + account: Option, + state: AccountState, +} + +// Account cache item. Contains account data and +// modification state +impl AccountEntry { + fn is_dirty(&self) -> bool { + self.state == AccountState::Dirty + } + + /// Clone dirty data into new `AccountEntry`. This includes + /// basic account data and modified storage keys. + /// Returns None if clean. + fn clone_if_dirty(&self) -> Option { + match self.is_dirty() { + true => Some(self.clone_dirty()), + false => None, + } + } + + /// Clone dirty data into new `AccountEntry`. This includes + /// basic account data and modified storage keys. + fn clone_dirty(&self) -> AccountEntry { + AccountEntry { + account: self.account.as_ref().map(Account::clone_dirty), + state: self.state, + } + } + + // Create a new account entry and mark it as dirty. + fn new_dirty(account: Option) -> AccountEntry { + AccountEntry { + account: account, + state: AccountState::Dirty, + } + } + + // Create a new account entry and mark it as clean. + fn new_clean(account: Option) -> AccountEntry { + AccountEntry { + account: account, + state: AccountState::CleanFresh, + } + } + + // Create a new account entry and mark it as clean and cached. + fn new_clean_cached(account: Option) -> AccountEntry { + AccountEntry { + account: account, + state: AccountState::CleanCached, + } + } + + // Replace data with another entry but preserve storage cache. + fn overwrite_with(&mut self, other: AccountEntry) { + self.state = other.state; + match other.account { + Some(acc) => match self.account { + Some(ref mut ours) => { + ours.overwrite_with(acc); + }, + None => {}, + }, + None => self.account = None, + } + } +} + /// Representation of the entire state of all accounts in the system. +/// +/// `State` can work together with `StateDB` to share account cache. +/// +/// Local cache contains changes made locally and changes accumulated +/// locally from previous commits. Global cache reflects the database +/// state and never contains any changes. +/// +/// Cache items contains account data, or the flag that account does not exist +/// and modification state (see `AccountState`) +/// +/// Account data can be in the following cache states: +/// * In global but not local - something that was queried from the database, +/// but never modified +/// * In local but not global - something that was just added (e.g. new account) +/// * In both with the same value - something that was changed to a new value, +/// but changed back to a previous block in the same block (same State instance) +/// * In both with different values - something that was overwritten with a +/// new value. +/// +/// All read-only state queries check local cache/modifications first, +/// then global state cache. If data is not found in any of the caches +/// it is loaded from the DB to the local cache. +/// +/// **** IMPORTANT ************************************************************* +/// All the modifications to the account data must set the `Dirty` state in the +/// `AccountEntry`. This is done in `require` and `require_or_from`. So just +/// use that. +/// **************************************************************************** +/// +/// Upon destruction all the local cache data propagated into the global cache. +/// Propagated items might be rejected if current state is non-canonical. +/// +/// State snapshotting. +/// +/// A new snapshot can be created with `snapshot()`. Snapshots can be +/// created in a hierarchy. +/// When a snapshot is active all changes are applied directly into +/// `cache` and the original value is copied into an active snapshot. +/// Reverting a snapshot with `revert_to_snapshot` involves copying +/// original values from the latest snapshot back into `cache`. The code +/// takes care not to overwrite cached storage while doing that. +/// Snapshot can be discateded with `discard_snapshot`. All of the orignal +/// backed-up values are moved into a parent snapshot (if any). +/// pub struct State { - db: Box, + db: StateDB, root: H256, - cache: RefCell>>, - snapshots: RefCell>>>>, + cache: RefCell>, + // The original account is preserved in + snapshots: RefCell>>>, account_start_nonce: U256, factories: Factories, } +#[derive(Copy, Clone)] +enum RequireCache { + None, + CodeSize, + Code, +} + const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \ Therefore creating a SecTrieDB with this state's root will not fail."; impl State { /// Creates new state with empty state root - //#[cfg(test)] - pub fn new(mut db: Box, account_start_nonce: U256, factories: Factories) -> State { + #[cfg(test)] + pub fn new(mut db: StateDB, account_start_nonce: U256, factories: Factories) -> State { let mut root = H256::new(); { // init trie and reset root too null @@ -75,7 +220,7 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: Box, root: H256, account_start_nonce: U256, factories: Factories) -> Result { + pub fn from_existing(db: StateDB, root: H256, account_start_nonce: U256, factories: Factories) -> Result { if !db.as_hashdb().contains(&root) { return Err(TrieError::InvalidStateRoot(root)); } @@ -92,45 +237,72 @@ impl State { Ok(state) } - /// Create a recoverable snaphot of this state + /// Create a recoverable snaphot of this state. pub fn snapshot(&mut self) { self.snapshots.borrow_mut().push(HashMap::new()); } - /// Merge last snapshot with previous - pub fn clear_snapshot(&mut self) { + /// Merge last snapshot with previous. + pub fn discard_snapshot(&mut self) { // merge with previous snapshot let last = self.snapshots.borrow_mut().pop(); if let Some(mut snapshot) = last { if let Some(ref mut prev) = self.snapshots.borrow_mut().last_mut() { - for (k, v) in snapshot.drain() { - prev.entry(k).or_insert(v); - } - } - } - } - - /// Revert to snapshot - pub fn revert_snapshot(&mut self) { - if let Some(mut snapshot) = self.snapshots.borrow_mut().pop() { - for (k, v) in snapshot.drain() { - match v { - Some(v) => { - self.cache.borrow_mut().insert(k, v); - }, - None => { - self.cache.borrow_mut().remove(&k); + if prev.is_empty() { + **prev = snapshot; + } else { + for (k, v) in snapshot.drain() { + prev.entry(k).or_insert(v); } } } } } - fn insert_cache(&self, address: &Address, account: Option) { - if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { - if !snapshot.contains_key(address) { - snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); - return; + /// Revert to the last snapshot and discard it. + pub fn revert_to_snapshot(&mut self) { + if let Some(mut snapshot) = self.snapshots.borrow_mut().pop() { + for (k, v) in snapshot.drain() { + match v { + Some(v) => { + match self.cache.borrow_mut().entry(k) { + Entry::Occupied(mut e) => { + // Merge snapshotted changes back into the main account + // storage preserving the cache. + e.get_mut().overwrite_with(v); + }, + Entry::Vacant(e) => { + e.insert(v); + } + } + }, + None => { + match self.cache.borrow_mut().entry(k) { + Entry::Occupied(e) => { + if e.get().is_dirty() { + e.remove(); + } + }, + _ => {} + } + } + } + } + } + } + + fn insert_cache(&self, address: &Address, account: AccountEntry) { + // Dirty account which is not in the cache means this is a new account. + // It goes directly into the snapshot as there's nothing to rever to. + // + // In all other cases account is read as clean first, and after that made + // dirty in and added to the snapshot with `note_cache`. + if account.is_dirty() { + if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { + if !snapshot.contains_key(address) { + snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); + return; + } } } self.cache.borrow_mut().insert(address.clone(), account); @@ -139,13 +311,14 @@ impl State { fn note_cache(&self, address: &Address) { if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { if !snapshot.contains_key(address) { - snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned()); + snapshot.insert(address.clone(), self.cache.borrow().get(address).map(AccountEntry::clone_dirty)); } } } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, Box) { + pub fn drop(mut self) -> (H256, StateDB) { + self.propagate_to_global_cache(); (self.root, self.db) } @@ -157,62 +330,120 @@ impl State { /// Create a new contract at address `contract`. If there is already an account at the address /// it will have its code reset, ready for `init_code()`. pub fn new_contract(&mut self, contract: &Address, balance: U256) { - self.insert_cache(contract, Some(Account::new_contract(balance, self.account_start_nonce))); + self.insert_cache(contract, AccountEntry::new_dirty(Some(Account::new_contract(balance, self.account_start_nonce)))); } /// Remove an existing account. pub fn kill_account(&mut self, account: &Address) { - self.insert_cache(account, None); + self.insert_cache(account, AccountEntry::new_dirty(None)); } /// Determine whether an account exists. pub fn exists(&self, a: &Address) -> bool { - self.ensure_cached(a, false, |a| a.is_some()) + self.ensure_cached(a, RequireCache::None, |a| a.is_some()) } /// Get the balance of account `a`. pub fn balance(&self, a: &Address) -> U256 { - self.ensure_cached(a, false, + self.ensure_cached(a, RequireCache::None, |a| a.as_ref().map_or(U256::zero(), |account| *account.balance())) } /// Get the nonce of account `a`. pub fn nonce(&self, a: &Address) -> U256 { - self.ensure_cached(a, false, + self.ensure_cached(a, RequireCache::None, |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) } /// Mutate storage of account `address` so that it is `value` for `key`. pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { - self.ensure_cached(address, false, |a| a.as_ref().map_or(H256::new(), |a| { - let addr_hash = a.address_hash(address); - let db = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); - a.storage_at(db.as_hashdb(), key) - })) + // Storage key search and update works like this: + // 1. If there's an entry for the account in the local cache check for the key and return it if found. + // 2. If there's an entry for the account in the global cache check for the key or load it into that account. + // 3. If account is missing in the global cache load it into the local cache and cache the key there. + + // check local cache first without updating + { + let local_cache = self.cache.borrow_mut(); + let mut local_account = None; + if let Some(maybe_acc) = local_cache.get(address) { + match maybe_acc.account { + Some(ref account) => { + if let Some(value) = account.cached_storage_at(key) { + return value; + } else { + local_account = Some(maybe_acc); + } + }, + _ => return H256::new(), + } + } + // check the global cache and and cache storage key there if found, + // otherwise cache the account localy and cache storage key there. + if let Some(result) = self.db.get_cached(address, |acc| acc.map_or(H256::new(), |a| { + let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); + a.storage_at(account_db.as_hashdb(), key) + })) { + return result; + } + if let Some(ref mut acc) = local_account { + if let Some(ref account) = acc.account { + let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(address)); + return account.storage_at(account_db.as_hashdb(), key) + } else { + return H256::new() + } + } + } + + // check bloom before any requests to trie + if !self.db.check_account_bloom(address) { return H256::zero() } + + // account is not found in the global cache, get from the DB and insert into local + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); + let maybe_acc = match db.get(address) { + Ok(acc) => acc.map(Account::from_rlp), + Err(e) => panic!("Potential DB corruption encountered: {}", e), + }; + let r = maybe_acc.as_ref().map_or(H256::new(), |a| { + let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); + a.storage_at(account_db.as_hashdb(), key) + }); + self.insert_cache(address, AccountEntry::new_clean(maybe_acc)); + r } - /// Get the code of account `a`. - pub fn code(&self, a: &Address) -> Option { - self.ensure_cached(a, true, - |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec()))) + /// Get accounts' code. + pub fn code(&self, a: &Address) -> Option> { + self.ensure_cached(a, RequireCache::Code, + |a| a.as_ref().map_or(None, |a| a.code().clone())) } - /// Get the code size of account `a`. + pub fn code_hash(&self, a: &Address) -> H256 { + self.ensure_cached(a, RequireCache::None, + |a| a.as_ref().map_or(SHA3_EMPTY, |a| a.code_hash())) + } + + /// Get accounts' code size. pub fn code_size(&self, a: &Address) -> Option { - self.ensure_cached(a, true, - |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.len()))) + self.ensure_cached(a, RequireCache::CodeSize, + |a| a.as_ref().and_then(|a| a.code_size())) } /// Add `incr` to the balance of account `a`. pub fn add_balance(&mut self, a: &Address, incr: &U256) { trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)); - self.require(a, false).add_balance(incr); + if !incr.is_zero() || !self.exists(a) { + self.require(a, false).add_balance(incr); + } } /// Subtract `decr` from the balance of account `a`. pub fn sub_balance(&mut self, a: &Address, decr: &U256) { trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)); - self.require(a, false).sub_balance(decr); + if !decr.is_zero() || !self.exists(a) { + self.require(a, false).sub_balance(decr); + } } /// Subtracts `by` from the balance of `from` and adds it to that of `to`. @@ -228,7 +459,9 @@ impl State { /// Mutate storage of account `a` so that it is `value` for `key`. pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) { - self.require(a, false).set_storage(key, value) + if self.storage_at(a, &key) != value { + self.require(a, false).set_storage(key, value) + } } /// Initialise the code of account `a` so that it is `code`. @@ -262,19 +495,19 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. #[cfg_attr(feature="dev", allow(match_ref_pats))] - pub fn commit_into( + fn commit_into( factories: &Factories, - db: &mut HashDB, + db: &mut StateDB, root: &mut H256, - accounts: &mut HashMap> + accounts: &mut HashMap ) -> Result<(), Error> { // first, commit the sub trees. - // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? - for (address, ref mut a) in accounts.iter_mut() { - match a { - &mut&mut Some(ref mut account) if account.is_dirty() => { + for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { + match a.account { + Some(ref mut account) => { + db.note_account_bloom(&address); let addr_hash = account.address_hash(address); - let mut account_db = factories.accountdb.create(db, addr_hash); + let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash); account.commit_storage(&factories.trie, account_db.as_hashdb_mut()); account.commit_code(account_db.as_hashdb_mut()); } @@ -283,15 +516,16 @@ impl State { } { - let mut trie = factories.trie.from_existing(db, root).unwrap(); - for (address, ref mut a) in accounts.iter_mut() { - match **a { - Some(ref mut account) if account.is_dirty() => { - account.set_clean(); - try!(trie.insert(address, &account.rlp())) + let mut trie = factories.trie.from_existing(db.as_hashdb_mut(), root).unwrap(); + for (address, ref mut a) in accounts.iter_mut().filter(|&(_, ref a)| a.is_dirty()) { + a.state = AccountState::Committed; + match a.account { + Some(ref mut account) => { + try!(trie.insert(address, &account.rlp())); + }, + None => { + try!(trie.remove(address)); }, - None => try!(trie.remove(address)), - _ => (), } } } @@ -299,10 +533,19 @@ impl State { Ok(()) } + /// Propagate local cache into shared canonical state cache. + fn propagate_to_global_cache(&mut self) { + let mut addresses = self.cache.borrow_mut(); + trace!("Committing cache {:?} entries", addresses.len()); + for (address, a) in addresses.drain().filter(|&(_, ref a)| a.state == AccountState::Committed || a.state == AccountState::CleanFresh) { + self.db.add_to_account_cache(address, a.account, a.state == AccountState::Committed); + } + } + /// Commits our cached account changes into the trie. pub fn commit(&mut self) -> Result<(), Error> { assert!(self.snapshots.borrow().is_empty()); - Self::commit_into(&self.factories, self.db.as_hashdb_mut(), &mut self.root, &mut *self.cache.borrow_mut()) + Self::commit_into(&self.factories, &mut self.db, &mut self.root, &mut *self.cache.borrow_mut()) } /// Clear state cache @@ -316,7 +559,8 @@ impl State { pub fn populate_from(&mut self, accounts: PodState) { assert!(self.snapshots.borrow().is_empty()); for (add, acc) in accounts.drain().into_iter() { - self.cache.borrow_mut().insert(add, Some(Account::from_pod(acc))); + self.db.note_account_bloom(&add); + self.cache.borrow_mut().insert(add, AccountEntry::new_dirty(Some(Account::from_pod(acc)))); } } @@ -326,7 +570,7 @@ impl State { // TODO: handle database rather than just the cache. // will need fat db. PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { - if let Some(ref acc) = *opt { + if let Some(ref acc) = opt.account { m.insert(add.clone(), PodAccount::from_account(acc)); } m @@ -335,7 +579,7 @@ impl State { fn query_pod(&mut self, query: &PodState) { for (address, pod_account) in query.get() { - self.ensure_cached(address, true, |a| { + self.ensure_cached(address, RequireCache::Code, |a| { if a.is_some() { for key in pod_account.storage.keys() { self.storage_at(address, key); @@ -354,28 +598,61 @@ impl State { pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post) } - /// Ensure account `a` is in our cache of the trie DB and return a handle for getting it. - /// `require_code` requires that the code be cached, too. - fn ensure_cached<'a, F, U>(&'a self, a: &'a Address, require_code: bool, f: F) -> U - where F: FnOnce(&Option) -> U { - let have_key = self.cache.borrow().contains_key(a); - if !have_key { - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let maybe_acc = match db.get(a) { - Ok(acc) => acc.map(Account::from_rlp), - Err(e) => panic!("Potential DB corruption encountered: {}", e), - }; - self.insert_cache(a, maybe_acc); - } - if require_code { - if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { - let addr_hash = account.address_hash(a); - let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); - account.cache_code(accountdb.as_hashdb()); + fn update_account_cache(require: RequireCache, account: &mut Account, db: &HashDB) { + match require { + RequireCache::None => {}, + RequireCache::Code => { + account.cache_code(db); + } + RequireCache::CodeSize => { + account.cache_code_size(db); } } + } - f(self.cache.borrow().get(a).unwrap()) + /// Check caches for required data + /// First searches for account in the local, then the shared cache. + /// Populates local cache if nothing found. + fn ensure_cached(&self, a: &Address, require: RequireCache, f: F) -> U + where F: Fn(Option<&Account>) -> U { + // check local cache first + if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { + if let Some(ref mut account) = maybe_acc.account { + let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); + Self::update_account_cache(require, account, accountdb.as_hashdb()); + return f(Some(account)); + } + return f(None); + } + // check global cache + let result = self.db.get_cached(a, |mut acc| { + if let Some(ref mut account) = acc { + let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); + Self::update_account_cache(require, account, accountdb.as_hashdb()); + } + f(acc.map(|a| &*a)) + }); + match result { + Some(r) => r, + None => { + // first check bloom if it is not in database for sure + if !self.db.check_account_bloom(a) { return f(None); } + + // not found in the global cache, get from the DB and insert into local + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); + let mut maybe_acc = match db.get(a) { + Ok(acc) => acc.map(Account::from_rlp), + Err(e) => panic!("Potential DB corruption encountered: {}", e), + }; + if let Some(ref mut account) = maybe_acc.as_mut() { + let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); + Self::update_account_cache(require, account, accountdb.as_hashdb()); + } + let r = f(maybe_acc.as_ref()); + self.insert_cache(a, AccountEntry::new_clean(maybe_acc)); + r + } + } } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. @@ -390,30 +667,48 @@ impl State { { let contains_key = self.cache.borrow().contains_key(a); if !contains_key { - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let maybe_acc = match db.get(a) { - Ok(acc) => acc.map(Account::from_rlp), - Err(e) => panic!("Potential DB corruption encountered: {}", e), - }; - - self.insert_cache(a, maybe_acc); - } else { - self.note_cache(a); - } - - match self.cache.borrow_mut().get_mut(a).unwrap() { - &mut Some(ref mut acc) => not_default(acc), - slot @ &mut None => *slot = Some(default()), - } - - RefMut::map(self.cache.borrow_mut(), |c| { - let account = c.get_mut(a).unwrap().as_mut().unwrap(); - if require_code { - let addr_hash = account.address_hash(a); - let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); - account.cache_code(accountdb.as_hashdb()); + match self.db.get_cached_account(a) { + Some(acc) => self.insert_cache(a, AccountEntry::new_clean_cached(acc)), + None => { + let maybe_acc = if self.db.check_account_bloom(a) { + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); + let maybe_acc = match db.get(a) { + Ok(Some(acc)) => AccountEntry::new_clean(Some(Account::from_rlp(acc))), + Ok(None) => AccountEntry::new_clean(None), + Err(e) => panic!("Potential DB corruption encountered: {}", e), + }; + maybe_acc + } + else { + AccountEntry::new_clean(None) + }; + self.insert_cache(a, maybe_acc); + } + } + } + self.note_cache(a); + + match &mut self.cache.borrow_mut().get_mut(a).unwrap().account { + &mut Some(ref mut acc) => not_default(acc), + slot => *slot = Some(default()), + } + + // at this point the account is guaranteed to be in the cache. + RefMut::map(self.cache.borrow_mut(), |c| { + let mut entry = c.get_mut(a).unwrap(); + // set the dirty flag after changing account data. + entry.state = AccountState::Dirty; + match entry.account { + Some(ref mut account) => { + if require_code { + let addr_hash = account.address_hash(a); + let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); + account.cache_code(accountdb.as_hashdb()); + } + account + }, + _ => panic!("Required account must always exist; qed"), } - account }) } } @@ -427,17 +722,10 @@ impl fmt::Debug for State { impl Clone for State { fn clone(&self) -> State { let cache = { - let mut cache = HashMap::new(); + let mut cache: HashMap = HashMap::new(); for (key, val) in self.cache.borrow().iter() { - let key = key.clone(); - match *val { - Some(ref acc) if acc.is_dirty() => { - cache.insert(key, Some(acc.clone())); - }, - None => { - cache.insert(key, None); - }, - _ => {}, + if let Some(entry) = val.clone_if_dirty() { + cache.insert(key.clone(), entry); } } cache @@ -447,7 +735,7 @@ impl Clone for State { db: self.db.boxed_clone(), root: self.root.clone(), cache: RefCell::new(cache), - snapshots: RefCell::new(self.snapshots.borrow().clone()), + snapshots: RefCell::new(Vec::new()), account_start_nonce: self.account_start_nonce.clone(), factories: self.factories.clone(), } @@ -457,6 +745,7 @@ impl Clone for State { #[cfg(test)] mod tests { +use std::sync::Arc; use std::str::FromStr; use rustc_serialize::hex::FromHex; use super::*; @@ -1321,14 +1610,14 @@ fn code_from_database() { let mut state = get_temp_state_in(temp.as_path()); state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}); state.init_code(&a, vec![1, 2, 3]); - assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec())); + assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); state.commit().unwrap(); - assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec())); + assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec())); + assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); } #[test] @@ -1476,12 +1765,12 @@ fn snapshot_basic() { state.snapshot(); state.add_balance(&a, &U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64)); - state.clear_snapshot(); + state.discard_snapshot(); assert_eq!(state.balance(&a), U256::from(69u64)); state.snapshot(); state.add_balance(&a, &U256::from(1u64)); assert_eq!(state.balance(&a), U256::from(70u64)); - state.revert_snapshot(); + state.revert_to_snapshot(); assert_eq!(state.balance(&a), U256::from(69u64)); } @@ -1494,9 +1783,9 @@ fn snapshot_nested() { state.snapshot(); state.add_balance(&a, &U256::from(69u64)); assert_eq!(state.balance(&a), U256::from(69u64)); - state.clear_snapshot(); + state.discard_snapshot(); assert_eq!(state.balance(&a), U256::from(69u64)); - state.revert_snapshot(); + state.revert_to_snapshot(); assert_eq!(state.balance(&a), U256::from(0)); } diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs new file mode 100644 index 000000000..04db274c4 --- /dev/null +++ b/ethcore/src/state_db.rs @@ -0,0 +1,480 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::{VecDeque, HashSet}; +use lru_cache::LruCache; +use util::journaldb::JournalDB; +use util::hash::{H256}; +use util::hashdb::HashDB; +use state::Account; +use header::BlockNumber; +use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable}; +use bloom_journal::{Bloom, BloomJournal}; +use db::COL_ACCOUNT_BLOOM; +use byteorder::{LittleEndian, ByteOrder}; + +const STATE_CACHE_ITEMS: usize = 256000; +const STATE_CACHE_BLOCKS: usize = 8; + +pub const ACCOUNT_BLOOM_SPACE: usize = 1048576; +pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000; + +pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count"; + +/// Shared canonical state cache. +struct AccountCache { + /// DB Account cache. `None` indicates that account is known to be missing. + accounts: LruCache>, + /// Information on the modifications in recently committed blocks; specifically which addresses + /// changed in which block. Ordered by block number. + modifications: VecDeque, +} + +/// Buffered account cache item. +struct CacheQueueItem { + /// Account address. + address: Address, + /// Acccount data or `None` if account does not exist. + account: Option, + /// Indicates that the account was modified before being + /// added to the cache. + modified: bool, +} + +#[derive(Debug)] +/// Accumulates a list of accounts changed in a block. +struct BlockChanges { + /// Block number. + number: BlockNumber, + /// Block hash. + hash: H256, + /// Parent block hash. + parent: H256, + /// A set of modified account addresses. + accounts: HashSet
, + /// Block is part of the canonical chain. + is_canon: bool, +} + +/// State database abstraction. +/// Manages shared global state cache which reflects the canonical +/// state as it is on the disk. All the entries in the cache are clean. +/// A clone of `StateDB` may be created as canonical or not. +/// For canonical clones local cache is accumulated and applied +/// in `sync_cache` +/// For non-canonical clones local cache is dropped. +/// +/// Global cache propagation. +/// After a `State` object has been committed to the trie it +/// propagates its local cache into the `StateDB` local cache +/// using `add_to_account_cache` function. +/// Then, after the block has been added to the chain the local cache in the +/// `StateDB` is propagated into the global cache. +pub struct StateDB { + /// Backing database. + db: Box, + /// Shared canonical state cache. + account_cache: Arc>, + /// Local dirty cache. + local_cache: Vec, + /// Shared account bloom. Does not handle chain reorganizations. + account_bloom: Arc>, + /// Hash of the block on top of which this instance was created or + /// `None` if cache is disabled + parent_hash: Option, + /// Hash of the committing block or `None` if not committed yet. + commit_hash: Option, + /// Number of the committing block or `None` if not committed yet. + commit_number: Option, +} + +impl StateDB { + /// Loads accounts bloom from the database + /// This bloom is used to handle request for the non-existant account fast + pub fn load_bloom(db: &Database) -> Bloom { + let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY) + .expect("Low-level database error"); + + if hash_count_entry.is_none() { + return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET); + } + let hash_count_bytes = hash_count_entry.unwrap(); + assert_eq!(hash_count_bytes.len(), 1); + let hash_count = hash_count_bytes[0]; + + let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8]; + let mut key = [0u8; 8]; + for i in 0..ACCOUNT_BLOOM_SPACE / 8 { + LittleEndian::write_u64(&mut key, i as u64); + bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error") + .and_then(|val| Some(LittleEndian::read_u64(&val[..]))) + .unwrap_or(0u64); + } + + let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32); + trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count); + bloom + } + + /// Create a new instance wrapping `JournalDB` + pub fn new(db: Box) -> StateDB { + let bloom = Self::load_bloom(db.backing()); + StateDB { + db: db, + account_cache: Arc::new(Mutex::new(AccountCache { + accounts: LruCache::new(STATE_CACHE_ITEMS), + modifications: VecDeque::new(), + })), + local_cache: Vec::new(), + account_bloom: Arc::new(Mutex::new(bloom)), + parent_hash: None, + commit_hash: None, + commit_number: None, + } + } + + pub fn check_account_bloom(&self, address: &Address) -> bool { + trace!(target: "account_bloom", "Check account bloom: {:?}", address); + let bloom = self.account_bloom.lock(); + bloom.check(&*address.sha3()) + } + + pub fn note_account_bloom(&self, address: &Address) { + trace!(target: "account_bloom", "Note account bloom: {:?}", address); + let mut bloom = self.account_bloom.lock(); + bloom.set(&*address.sha3()); + } + + pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> { + assert!(journal.hash_functions <= 255); + batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]); + let mut key = [0u8; 8]; + let mut val = [0u8; 8]; + + for (bloom_part_index, bloom_part_value) in journal.entries { + LittleEndian::write_u64(&mut key, bloom_part_index as u64); + LittleEndian::write_u64(&mut val, bloom_part_value); + batch.put(COL_ACCOUNT_BLOOM, &key, &val); + } + Ok(()) + } + + /// Commit all recent insert operations and canonical historical commits' removals from the + /// old era to the backing database, reverting any non-canonical historical commit's inserts. + pub fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + { + let mut bloom_lock = self.account_bloom.lock(); + try!(Self::commit_bloom(batch, bloom_lock.drain_journal())); + } + let records = try!(self.db.commit(batch, now, id, end)); + self.commit_hash = Some(id.clone()); + self.commit_number = Some(now); + Ok(records) + } + + /// Propagate local cache into the global cache and synchonize + /// the global cache with the best block state. + /// This function updates the global cache by removing entries + /// that are invalidated by chain reorganization. `sync_cache` + /// should be called after the block has been committed and the + /// blockchain route has ben calculated. + pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) { + trace!("sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", self.commit_number, self.commit_hash, self.parent_hash, is_best); + let mut cache = self.account_cache.lock(); + let mut cache = &mut *cache; + + // Purge changes from re-enacted and retracted blocks. + // Filter out commiting block if any. + let mut clear = false; + for block in enacted.iter().filter(|h| self.commit_hash.as_ref().map_or(true, |p| *h != p)) { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) { + trace!("Reverting enacted block {:?}", block); + m.is_canon = true; + for a in &m.accounts { + trace!("Reverting enacted address {:?}", a); + cache.accounts.remove(a); + } + false + } else { + true + } + }; + } + + for block in retracted { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|ref m| &m.hash == block) { + trace!("Retracting block {:?}", block); + m.is_canon = false; + for a in &m.accounts { + trace!("Retracted address {:?}", a); + cache.accounts.remove(a); + } + false + } else { + true + } + }; + } + if clear { + // We don't know anything about the block; clear everything + trace!("Wiping cache"); + cache.accounts.clear(); + cache.modifications.clear(); + } + + // Propagate cache only if committing on top of the latest canonical state + // blocks are ordered by number and only one block with a given number is marked as canonical + // (contributed to canonical state cache) + if let (Some(ref number), Some(ref hash), Some(ref parent)) = (self.commit_number, self.commit_hash, self.parent_hash) { + if cache.modifications.len() == STATE_CACHE_BLOCKS { + cache.modifications.pop_back(); + } + let mut modifications = HashSet::new(); + trace!("committing {} cache entries", self.local_cache.len()); + for account in self.local_cache.drain(..) { + if account.modified { + modifications.insert(account.address.clone()); + } + if is_best { + if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&account.address) { + if let Some(new) = account.account { + if account.modified { + existing.overwrite_with(new); + } + continue; + } + } + cache.accounts.insert(account.address, account.account); + } + } + + // Save modified accounts. These are ordered by the block number. + let block_changes = BlockChanges { + accounts: modifications, + number: *number, + hash: hash.clone(), + is_canon: is_best, + parent: parent.clone(), + }; + let insert_at = cache.modifications.iter().enumerate().find(|&(_, ref m)| m.number < *number).map(|(i, _)| i); + trace!("inserting modifications at {:?}", insert_at); + if let Some(insert_at) = insert_at { + cache.modifications.insert(insert_at, block_changes); + } else { + cache.modifications.push_back(block_changes); + } + } + } + + /// Returns an interface to HashDB. + pub fn as_hashdb(&self) -> &HashDB { + self.db.as_hashdb() + } + + /// Returns an interface to mutable HashDB. + pub fn as_hashdb_mut(&mut self) -> &mut HashDB { + self.db.as_hashdb_mut() + } + + /// Clone the database. + pub fn boxed_clone(&self) -> StateDB { + StateDB { + db: self.db.boxed_clone(), + account_cache: self.account_cache.clone(), + local_cache: Vec::new(), + account_bloom: self.account_bloom.clone(), + parent_hash: None, + commit_hash: None, + commit_number: None, + } + } + + /// Clone the database for a canonical state. + pub fn boxed_clone_canon(&self, parent: &H256) -> StateDB { + StateDB { + db: self.db.boxed_clone(), + account_cache: self.account_cache.clone(), + local_cache: Vec::new(), + account_bloom: self.account_bloom.clone(), + parent_hash: Some(parent.clone()), + commit_hash: None, + commit_number: None, + } + } + + /// Check if pruning is enabled on the database. + pub fn is_pruned(&self) -> bool { + self.db.is_pruned() + } + + /// Heap size used. + pub fn mem_used(&self) -> usize { + self.db.mem_used() //TODO: + self.account_cache.lock().heap_size_of_children() + } + + /// Returns underlying `JournalDB`. + pub fn journal_db(&self) -> &JournalDB { + &*self.db + } + + /// Add a local cache entry. + /// The entry will be propagated to the global cache in `sync_cache`. + /// `modified` indicates that the entry was changed since being read from disk or global cache. + /// `data` can be set to an existing (`Some`), or non-existing account (`None`). + pub fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool) { + self.local_cache.push(CacheQueueItem { + address: addr, + account: data, + modified: modified, + }) + } + + /// Get basic copy of the cached account. Does not include storage. + /// Returns 'None' if cache is disabled or if the account is not cached. + pub fn get_cached_account(&self, addr: &Address) -> Option> { + let mut cache = self.account_cache.lock(); + if !Self::is_allowed(addr, &self.parent_hash, &cache.modifications) { + return None; + } + cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic())) + } + + /// Get value from a cached account. + /// Returns 'None' if cache is disabled or if the account is not cached. + pub fn get_cached(&self, a: &Address, f: F) -> Option + where F: FnOnce(Option<&mut Account>) -> U { + let mut cache = self.account_cache.lock(); + if !Self::is_allowed(a, &self.parent_hash, &cache.modifications) { + return None; + } + cache.accounts.get_mut(a).map(|c| f(c.as_mut())) + } + + /// Check if the account can be returned from cache by matching current block parent hash against canonical + /// state and filtering out account modified in later blocks. + fn is_allowed(addr: &Address, parent_hash: &Option, modifications: &VecDeque) -> bool { + let mut parent = match *parent_hash { + None => { + trace!("Cache lookup skipped for {:?}: no parent hash", addr); + return false; + } + Some(ref parent) => parent, + }; + if modifications.is_empty() { + return true; + } + // Ignore all accounts modified in later blocks + // Modifications contains block ordered by the number + // We search for our parent in that list first and then for + // all its parent until we hit the canonical block, + // checking against all the intermediate modifications. + let mut iter = modifications.iter(); + while let Some(ref m) = iter.next() { + if &m.hash == parent { + if m.is_canon { + return true; + } + parent = &m.parent; + } + if m.accounts.contains(addr) { + trace!("Cache lookup skipped for {:?}: modified in a later block", addr); + return false; + } + } + trace!("Cache lookup skipped for {:?}: parent hash is unknown", addr); + return false; + } +} + +#[cfg(test)] +mod tests { + +use util::{U256, H256, FixedHash, Address, DBTransaction}; +use tests::helpers::*; +use state::Account; +use util::log::init_log; + +#[test] +fn state_db_smoke() { + init_log(); + + let mut state_db_result = get_temp_state_db(); + let state_db = state_db_result.take(); + let root_parent = H256::random(); + let address = Address::random(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + let mut batch = DBTransaction::new(state_db.journal_db().backing()); + + // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] + // balance [ 5 5 4 3 2 2 ] + let mut s = state_db.boxed_clone_canon(&root_parent); + s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false); + s.commit(&mut batch, 0, &h0, None).unwrap(); + s.sync_cache(&[], &[], true); + + let mut s = state_db.boxed_clone_canon(&h0); + s.commit(&mut batch, 1, &h1a, None).unwrap(); + s.sync_cache(&[], &[], true); + + let mut s = state_db.boxed_clone_canon(&h0); + s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true); + s.commit(&mut batch, 1, &h1b, None).unwrap(); + s.sync_cache(&[], &[], false); + + let mut s = state_db.boxed_clone_canon(&h1b); + s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true); + s.commit(&mut batch, 2, &h2b, None).unwrap(); + s.sync_cache(&[], &[], false); + + let mut s = state_db.boxed_clone_canon(&h1a); + s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true); + s.commit(&mut batch, 2, &h2a, None).unwrap(); + s.sync_cache(&[], &[], true); + + let mut s = state_db.boxed_clone_canon(&h2a); + s.commit(&mut batch, 3, &h3a, None).unwrap(); + s.sync_cache(&[], &[], true); + + let s = state_db.boxed_clone_canon(&h3a); + assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5)); + + let s = state_db.boxed_clone_canon(&h1a); + assert!(s.get_cached_account(&address).is_none()); + + let s = state_db.boxed_clone_canon(&h2b); + assert!(s.get_cached_account(&address).is_none()); + + let s = state_db.boxed_clone_canon(&h1b); + assert!(s.get_cached_account(&address).is_none()); + + // reorg to 3b + // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] + let mut s = state_db.boxed_clone_canon(&h2b); + s.commit(&mut batch, 3, &h3b, None).unwrap(); + s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true); + let s = state_db.boxed_clone_canon(&h3a); + assert!(s.get_cached_account(&address).is_none()); +} +} + diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index dc95e8267..067f28d39 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -57,7 +57,11 @@ fn should_return_registrar() { IoChannel::disconnected(), &db_config ).unwrap(); - assert_eq!(client.additional_params().get("registrar"), Some(&"8e4e9b13d4b45cb0befc93c3061b1408f67316b2".to_owned())); + let params = client.additional_params(); + let address = params.get("registrar").unwrap(); + + assert_eq!(address.len(), 40); + assert!(U256::from_str(address).is_ok()); } #[test] diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index c1f99f434..459d58b83 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -19,15 +19,17 @@ use io::*; use client::{BlockChainClient, Client, ClientConfig}; use common::*; use spec::*; +use state_db::StateDB; use block::{OpenBlock, Drain}; use blockchain::{BlockChain, Config as BlockChainConfig}; -use state::*; +use state::State; use evm::Schedule; use engines::Engine; use ethereum; use devtools::*; use miner::Miner; use rlp::{self, RlpStream, Stream}; +use db::COL_STATE; #[cfg(feature = "json-tests")] pub enum ChainEra { @@ -146,9 +148,9 @@ pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_numbe ).unwrap(); let test_engine = &*test_spec.engine; - let mut db_result = get_temp_journal_db(); + let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - test_spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); + test_spec.ensure_db_good(&mut db).unwrap(); let genesis_header = test_spec.genesis_header(); let mut rolling_timestamp = 40; @@ -321,9 +323,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { } } -pub fn get_temp_journal_db() -> GuardedTempResult> { +pub fn get_temp_state_db() -> GuardedTempResult { let temp = RandomTempPath::new(); - let journal_db = get_temp_journal_db_in(temp.as_path()); + let journal_db = get_temp_state_db_in(temp.as_path()); GuardedTempResult { _temp: temp, @@ -331,9 +333,10 @@ pub fn get_temp_journal_db() -> GuardedTempResult> { } } +#[cfg(test)] pub fn get_temp_state() -> GuardedTempResult { let temp = RandomTempPath::new(); - let journal_db = get_temp_journal_db_in(temp.as_path()); + let journal_db = get_temp_state_db_in(temp.as_path()); GuardedTempResult { _temp: temp, @@ -341,13 +344,15 @@ pub fn get_temp_state() -> GuardedTempResult { } } -pub fn get_temp_journal_db_in(path: &Path) -> Box { +pub fn get_temp_state_db_in(path: &Path) -> StateDB { let db = new_db(path.to_str().expect("Only valid utf8 paths for tests.")); - journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None) + let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, COL_STATE); + StateDB::new(journal_db) } +#[cfg(test)] pub fn get_temp_state_in(path: &Path) -> State { - let journal_db = get_temp_journal_db_in(path); + let journal_db = get_temp_state_db_in(path); State::new(journal_db, U256::from(0), Default::default()) } diff --git a/ethcore/src/tests/mod.rs b/ethcore/src/tests/mod.rs index 73c5777a4..86266c1d0 100644 --- a/ethcore/src/tests/mod.rs +++ b/ethcore/src/tests/mod.rs @@ -17,5 +17,5 @@ pub mod helpers; #[cfg(test)] mod client; -#[cfg(test)] +#[cfg(feature="ipc")] mod rpc; diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index d5d88c087..b021e750d 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -19,7 +19,8 @@ use nanoipc; use std::sync::Arc; use std::sync::atomic::{Ordering, AtomicBool}; -use client::{Client, BlockChainClient, ClientConfig, RemoteClient, BlockID}; +use client::{Client, BlockChainClient, ClientConfig, BlockID}; +use client::remote::RemoteClient; use tests::helpers::*; use devtools::*; use miner::Miner; diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index b608ad685..2cf14828a 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -256,16 +256,6 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { return; } - // at first, let's insert new block traces - { - let mut traces = self.traces.write(); - // it's important to use overwrite here, - // cause this value might be queried by hash later - batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); - // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection - self.note_used(CacheID::Trace(request.block_hash.clone())); - } - // now let's rebuild the blooms if !request.enacted.is_empty() { let range_start = request.block_number as Number + 1 - request.enacted.len(); @@ -276,12 +266,25 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { // all traces are expected to be found here. That's why `expect` has been used // instead of `filter_map`. If some traces haven't been found, it meens that // traces database is corrupted or incomplete. - .map(|block_hash| self.traces(block_hash).expect("Traces database is incomplete.")) - .map(|block_traces| block_traces.bloom()) + .map(|block_hash| if block_hash == &request.block_hash { + request.traces.bloom() + } else { + self.traces(block_hash).expect("Traces database is incomplete.").bloom() + }) .map(blooms::Bloom::from) .map(Into::into) .collect(); + // insert new block traces into the cache and the database + { + let mut traces = self.traces.write(); + // it's important to use overwrite here, + // cause this value might be queried by hash later + batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite); + // note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection + self.note_used(CacheID::Trace(request.block_hash.clone())); + } + let chain = BloomGroupChain::new(self.bloom_config, self); let trace_blooms = chain.replace(&replaced_range, enacted_blooms); let blooms_to_insert = trace_blooms.into_iter() diff --git a/ethcore/src/trace/executive_tracer.rs b/ethcore/src/trace/executive_tracer.rs index b086b2378..ca9bc30b5 100644 --- a/ethcore/src/trace/executive_tracer.rs +++ b/ethcore/src/trace/executive_tracer.rs @@ -31,7 +31,7 @@ fn top_level_subtraces(traces: &[FlatTrace]) -> usize { traces.iter().filter(|t| t.trace_address.is_empty()).count() } -fn update_trace_address(traces: Vec) -> Vec { +fn prefix_subtrace_addresses(mut traces: Vec) -> Vec { // input traces are expected to be ordered like // [] // [0] @@ -48,26 +48,38 @@ fn update_trace_address(traces: Vec) -> Vec { // [0, 0, 1] // [1] // [1, 0] - let mut top_subtrace_index = 0; - let mut subtrace_subtraces_left = 0; - traces.into_iter().map(|mut trace| { - let is_top_subtrace = trace.trace_address.is_empty(); - let is_subtrace = trace.trace_address.len() == 1; - trace.trace_address.push_front(top_subtrace_index); - - if is_top_subtrace { - subtrace_subtraces_left = trace.subtraces; - } else if is_subtrace { - subtrace_subtraces_left -= 1; - } - - if subtrace_subtraces_left == 0 { - top_subtrace_index += 1; - } - trace - }).collect() + let mut current_subtrace_index = 0; + let mut first = true; + for trace in traces.iter_mut() { + match (first, trace.trace_address.is_empty()) { + (true, _) => first = false, + (_, true) => current_subtrace_index += 1, + _ => {} + } + trace.trace_address.push_front(current_subtrace_index); + } + traces } +#[test] +fn should_prefix_address_properly() { + use super::trace::{Action, Res, Suicide}; + + let f = |v: Vec| FlatTrace { + action: Action::Suicide(Suicide { + address: Default::default(), + balance: Default::default(), + refund_address: Default::default(), + }), + result: Res::None, + subtraces: 0, + trace_address: v.into_iter().collect(), + }; + let t = vec![vec![], vec![0], vec![0, 0], vec![0], vec![], vec![], vec![0], vec![]].into_iter().map(&f).collect(); + let t = prefix_subtrace_addresses(t); + assert_eq!(t, vec![vec![0], vec![0, 0], vec![0, 0, 0], vec![0, 0], vec![1], vec![2], vec![2, 0], vec![3]].into_iter().map(&f).collect::>()); +} + impl Tracer for ExecutiveTracer { fn prepare_trace_call(&self, params: &ActionParams) -> Option { Some(Call::from(params.clone())) @@ -93,7 +105,7 @@ impl Tracer for ExecutiveTracer { }; debug!(target: "trace", "Traced call {:?}", trace); self.traces.push(trace); - self.traces.extend(update_trace_address(subs)); + self.traces.extend(prefix_subtrace_addresses(subs)); } fn trace_create(&mut self, create: Option, gas_used: U256, code: Option, address: Address, subs: Vec) { @@ -109,7 +121,7 @@ impl Tracer for ExecutiveTracer { }; debug!(target: "trace", "Traced create {:?}", trace); self.traces.push(trace); - self.traces.extend(update_trace_address(subs)); + self.traces.extend(prefix_subtrace_addresses(subs)); } fn trace_failed_call(&mut self, call: Option, subs: Vec, error: TraceError) { @@ -121,7 +133,7 @@ impl Tracer for ExecutiveTracer { }; debug!(target: "trace", "Traced failed call {:?}", trace); self.traces.push(trace); - self.traces.extend(update_trace_address(subs)); + self.traces.extend(prefix_subtrace_addresses(subs)); } fn trace_failed_create(&mut self, create: Option, subs: Vec, error: TraceError) { @@ -133,7 +145,7 @@ impl Tracer for ExecutiveTracer { }; debug!(target: "trace", "Traced failed create {:?}", trace); self.traces.push(trace); - self.traces.extend(update_trace_address(subs)); + self.traces.extend(prefix_subtrace_addresses(subs)); } fn trace_suicide(&mut self, address: Address, balance: U256, refund_address: Address) { diff --git a/ethcore/src/types/block_status.rs b/ethcore/src/types/block_status.rs index bf8218e47..857daae10 100644 --- a/ethcore/src/types/block_status.rs +++ b/ethcore/src/types/block_status.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . //! Block status description module +use verification::queue::Status as QueueStatus; /// General block status #[derive(Debug, Eq, PartialEq, Binary)] @@ -28,3 +29,13 @@ pub enum BlockStatus { /// Unknown. Unknown, } + +impl From for BlockStatus { + fn from(status: QueueStatus) -> Self { + match status { + QueueStatus::Queued => BlockStatus::Queued, + QueueStatus::Bad => BlockStatus::Bad, + QueueStatus::Unknown => BlockStatus::Unknown, + } + } +} \ No newline at end of file diff --git a/ethcore/src/types/filter.rs b/ethcore/src/types/filter.rs index 6274d63f4..e3487e5f6 100644 --- a/ethcore/src/types/filter.rs +++ b/ethcore/src/types/filter.rs @@ -22,7 +22,7 @@ use client::BlockID; use log_entry::LogEntry; /// Blockchain Filter. -#[derive(Binary)] +#[derive(Binary, Debug, PartialEq)] pub struct Filter { /// Blockchain will be searched from this block. pub from_block: BlockID, diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index 0537fe056..32c7faabe 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -25,7 +25,7 @@ pub mod executed; pub mod block_status; pub mod account_diff; pub mod state_diff; -pub mod block_queue_info; +pub mod verification_queue_info; pub mod filter; pub mod trace_filter; pub mod call_analytics; diff --git a/ethcore/src/types/trace_types/trace.rs b/ethcore/src/types/trace_types/trace.rs index 9efeaa001..2571805a6 100644 --- a/ethcore/src/types/trace_types/trace.rs +++ b/ethcore/src/types/trace_types/trace.rs @@ -181,7 +181,7 @@ impl From for Create { from: p.sender, value: p.value.value(), gas: p.gas, - init: p.code.unwrap_or_else(Vec::new), + init: p.code.map_or_else(Vec::new, |c| (*c).clone()), } } } diff --git a/ethcore/src/types/transaction.rs b/ethcore/src/types/transaction.rs index 386b85f7e..f32a2f4dd 100644 --- a/ethcore/src/types/transaction.rs +++ b/ethcore/src/types/transaction.rs @@ -20,7 +20,7 @@ use std::ops::Deref; use std::cell::*; use rlp::*; use util::sha3::Hashable; -use util::{H256, Address, U256, Bytes}; +use util::{H256, Address, U256, Bytes, HeapSizeOf}; use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError}; use error::*; use evm::Schedule; @@ -86,6 +86,12 @@ impl Transaction { } } +impl HeapSizeOf for Transaction { + fn heap_size_of_children(&self) -> usize { + self.data.heap_size_of_children() + } +} + impl From for SignedTransaction { fn from(t: ethjson::state::Transaction) -> Self { let to: Option = t.to.into(); @@ -251,6 +257,12 @@ impl Encodable for SignedTransaction { fn rlp_append(&self, s: &mut RlpStream) { self.rlp_append_sealed_transaction(s) } } +impl HeapSizeOf for SignedTransaction { + fn heap_size_of_children(&self) -> usize { + self.unsigned.heap_size_of_children() + } +} + impl SignedTransaction { /// Append object with a signature into RLP stream pub fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) { diff --git a/ethcore/src/types/verification_queue_info.rs b/ethcore/src/types/verification_queue_info.rs new file mode 100644 index 000000000..35954e7a9 --- /dev/null +++ b/ethcore/src/types/verification_queue_info.rs @@ -0,0 +1,53 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Verification queue info types + +/// Verification queue status +#[derive(Debug, Binary)] +pub struct VerificationQueueInfo { + /// Number of queued items pending verification + pub unverified_queue_size: usize, + /// Number of verified queued items pending import + pub verified_queue_size: usize, + /// Number of items being verified + pub verifying_queue_size: usize, + /// Configured maximum number of items in the queue + pub max_queue_size: usize, + /// Configured maximum number of bytes to use + pub max_mem_use: usize, + /// Heap memory used in bytes + pub mem_used: usize, +} + +impl VerificationQueueInfo { + /// The total size of the queues. + pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size } + + /// The size of the unverified and verifying queues. + pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size } + + /// Indicates that queue is full + pub fn is_full(&self) -> bool { + self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size || + self.mem_used > self.max_mem_use + } + + /// Indicates that queue is empty + pub fn is_empty(&self) -> bool { + self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0 + } +} \ No newline at end of file diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index ed9c8ebc7..ed9e0ec4c 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -16,6 +16,7 @@ pub mod verification; pub mod verifier; +pub mod queue; mod canon_verifier; mod noop_verifier; @@ -23,6 +24,7 @@ pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; pub use self::noop_verifier::NoopVerifier; +pub use self::queue::{BlockQueue, Config as QueueConfig, VerificationQueue, QueueInfo}; /// Verifier type. #[derive(Debug, PartialEq, Clone)] diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs new file mode 100644 index 000000000..b6b6c5cf6 --- /dev/null +++ b/ethcore/src/verification/queue/kind.rs @@ -0,0 +1,183 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Definition of valid items for the verification queue. + +use engines::Engine; +use error::Error; + +use util::{HeapSizeOf, H256}; + +pub use self::blocks::Blocks; +pub use self::headers::Headers; + +/// Something which can produce a hash and a parent hash. +pub trait HasHash { + /// Get the hash of this item. + fn hash(&self) -> H256; + + /// Get the hash of this item's parent. + fn parent_hash(&self) -> H256; +} + +/// Defines transitions between stages of verification. +/// +/// It starts with a fallible transformation from an "input" into the unverified item. +/// This consists of quick, simply done checks as well as extracting particular data. +/// +/// Then, there is a `verify` function which performs more expensive checks and +/// produces the verified output. +/// +/// For correctness, the hashes produced by each stage of the pipeline should be +/// consistent. +pub trait Kind: 'static + Sized + Send + Sync { + /// The first stage: completely unverified. + type Input: Sized + Send + HasHash + HeapSizeOf; + + /// The second stage: partially verified. + type Unverified: Sized + Send + HasHash + HeapSizeOf; + + /// The third stage: completely verified. + type Verified: Sized + Send + HasHash + HeapSizeOf; + + /// Attempt to create the `Unverified` item from the input. + fn create(input: Self::Input, engine: &Engine) -> Result; + + /// Attempt to verify the `Unverified` item using the given engine. + fn verify(unverified: Self::Unverified, engine: &Engine) -> Result; +} + +/// The blocks verification module. +pub mod blocks { + use super::{Kind, HasHash}; + + use engines::Engine; + use error::Error; + use header::Header; + use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered}; + + use util::{Bytes, HeapSizeOf, H256}; + + /// A mode for verifying blocks. + pub struct Blocks; + + impl Kind for Blocks { + type Input = Unverified; + type Unverified = Unverified; + type Verified = PreverifiedBlock; + + fn create(input: Self::Input, engine: &Engine) -> Result { + match verify_block_basic(&input.header, &input.bytes, engine) { + Ok(()) => Ok(input), + Err(e) => { + warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e); + Err(e) + } + } + } + + fn verify(un: Self::Unverified, engine: &Engine) -> Result { + let hash = un.hash(); + match verify_block_unordered(un.header, un.bytes, engine) { + Ok(verified) => Ok(verified), + Err(e) => { + warn!(target: "client", "Stage 2 block verification failed for {}: {:?}", hash, e); + Err(e) + } + } + } + } + + /// An unverified block. + pub struct Unverified { + header: Header, + bytes: Bytes, + } + + impl Unverified { + /// Create an `Unverified` from raw bytes. + pub fn new(bytes: Bytes) -> Self { + use views::BlockView; + + let header = BlockView::new(&bytes).header(); + Unverified { + header: header, + bytes: bytes, + } + } + } + + impl HeapSizeOf for Unverified { + fn heap_size_of_children(&self) -> usize { + self.header.heap_size_of_children() + self.bytes.heap_size_of_children() + } + } + + impl HasHash for Unverified { + fn hash(&self) -> H256 { + self.header.hash() + } + + fn parent_hash(&self) -> H256 { + self.header.parent_hash().clone() + } + } + + impl HasHash for PreverifiedBlock { + fn hash(&self) -> H256 { + self.header.hash() + } + + fn parent_hash(&self) -> H256 { + self.header.parent_hash().clone() + } + } +} + +/// Verification for headers. +pub mod headers { + use super::{Kind, HasHash}; + + use engines::Engine; + use error::Error; + use header::Header; + use verification::verify_header_params; + + use util::hash::H256; + + impl HasHash for Header { + fn hash(&self) -> H256 { self.hash() } + fn parent_hash(&self) -> H256 { self.parent_hash().clone() } + } + + /// A mode for verifying headers. + #[allow(dead_code)] + pub struct Headers; + + impl Kind for Headers { + type Input = Header; + type Unverified = Header; + type Verified = Header; + + fn create(input: Self::Input, engine: &Engine) -> Result { + verify_header_params(&input, engine).map(|_| input) + } + + fn verify(unverified: Self::Unverified, engine: &Engine) -> Result { + engine.verify_block_unordered(&unverified, None).map(|_| unverified) + } + } +} diff --git a/ethcore/src/block_queue.rs b/ethcore/src/verification/queue/mod.rs similarity index 65% rename from ethcore/src/block_queue.rs rename to ethcore/src/verification/queue/mod.rs index c441136fd..3f81d53ce 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/verification/queue/mod.rs @@ -16,30 +16,35 @@ //! A queue of blocks. Sits between network or other I/O and the `BlockChain`. //! Sorts them ready for blockchain insertion. + use std::thread::{JoinHandle, self}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use std::sync::{Condvar as SCondvar, Mutex as SMutex}; use util::*; use io::*; -use verification::*; use error::*; use engines::Engine; -use views::*; -use header::*; use service::*; -use client::BlockStatus; -pub use types::block_queue_info::BlockQueueInfo; +use self::kind::{HasHash, Kind}; -known_heap_size!(0, UnverifiedBlock, VerifyingBlock, PreverifiedBlock); +pub use types::verification_queue_info::VerificationQueueInfo as QueueInfo; + +pub mod kind; const MIN_MEM_LIMIT: usize = 16384; const MIN_QUEUE_LIMIT: usize = 512; -/// Block queue configuration +/// Type alias for block queue convenience. +pub type BlockQueue = VerificationQueue; + +/// Type alias for header queue convenience. +pub type HeaderQueue = VerificationQueue; + +/// Verification queue configuration #[derive(Debug, PartialEq, Clone)] -pub struct BlockQueueConfig { - /// Maximum number of blocks to keep in unverified queue. +pub struct Config { + /// Maximum number of items to keep in unverified queue. /// When the limit is reached, is_full returns true. pub max_queue_size: usize, /// Maximum heap memory to use. @@ -47,42 +52,44 @@ pub struct BlockQueueConfig { pub max_mem_use: usize, } -impl Default for BlockQueueConfig { +impl Default for Config { fn default() -> Self { - BlockQueueConfig { + Config { max_queue_size: 30000, max_mem_use: 50 * 1024 * 1024, } } } +/// An item which is in the process of being verified. +pub struct Verifying { + hash: H256, + output: Option, +} -impl BlockQueueInfo { - /// The total size of the queues. - pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size } - - /// The size of the unverified and verifying queues. - pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size } - - /// Indicates that queue is full - pub fn is_full(&self) -> bool { - self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size || - self.mem_used > self.max_mem_use - } - - /// Indicates that queue is empty - pub fn is_empty(&self) -> bool { - self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size == 0 +impl HeapSizeOf for Verifying { + fn heap_size_of_children(&self) -> usize { + self.output.heap_size_of_children() } } -/// A queue of blocks. Sits between network or other I/O and the `BlockChain`. -/// Sorts them ready for blockchain insertion. -pub struct BlockQueue { +/// Status of items in the queue. +pub enum Status { + /// Currently queued. + Queued, + /// Known to be bad. + Bad, + /// Unknown. + Unknown, +} + +/// A queue of items to be verified. Sits between network or other I/O and the `BlockChain`. +/// Keeps them in the same order as inserted, minus invalid items. +pub struct VerificationQueue { panic_handler: Arc, engine: Arc, more_to_verify: Arc, - verification: Arc, + verification: Arc>, verifiers: Vec>, deleting: Arc, ready_signal: Arc, @@ -92,16 +99,6 @@ pub struct BlockQueue { max_mem_use: usize, } -struct UnverifiedBlock { - header: Header, - bytes: Bytes, -} - -struct VerifyingBlock { - hash: H256, - block: Option, -} - struct QueueSignal { deleting: Arc, signalled: AtomicBool, @@ -128,19 +125,19 @@ impl QueueSignal { } } -struct Verification { +struct Verification { // All locks must be captured in the order declared here. - unverified: Mutex>, - verified: Mutex>, - verifying: Mutex>, + unverified: Mutex>, + verified: Mutex>, + verifying: Mutex>>, bad: Mutex>, more_to_verify: SMutex<()>, empty: SMutex<()>, } -impl BlockQueue { +impl VerificationQueue { /// Creates a new queue instance. - pub fn new(config: BlockQueueConfig, engine: Arc, message_channel: IoChannel) -> BlockQueue { + pub fn new(config: Config, engine: Arc, message_channel: IoChannel) -> Self { let verification = Arc::new(Verification { unverified: Mutex::new(VecDeque::new()), verified: Mutex::new(VecDeque::new()), @@ -175,13 +172,13 @@ impl BlockQueue { .name(format!("Verifier #{}", i)) .spawn(move || { panic_handler.catch_panic(move || { - BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty) + VerificationQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty) }).unwrap() }) .expect("Error starting block verification thread") ); } - BlockQueue { + VerificationQueue { engine: engine, panic_handler: panic_handler, ready_signal: ready_signal.clone(), @@ -196,7 +193,7 @@ impl BlockQueue { } } - fn verify(verification: Arc, engine: Arc, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { + fn verify(verification: Arc>, engine: Arc, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { while !deleting.load(AtomicOrdering::Acquire) { { let mut more_to_verify = verification.more_to_verify.lock().unwrap(); @@ -214,57 +211,66 @@ impl BlockQueue { } } - let block = { + let item = { + // acquire these locks before getting the item to verify. let mut unverified = verification.unverified.lock(); - if unverified.is_empty() { - continue; - } let mut verifying = verification.verifying.lock(); - let block = unverified.pop_front().unwrap(); - verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); - block + + let item = match unverified.pop_front() { + Some(item) => item, + None => continue, + }; + + verifying.push_back(Verifying { hash: item.hash(), output: None }); + item }; - let block_hash = block.header.hash(); - match verify_block_unordered(block.header, block.bytes, &*engine) { + let hash = item.hash(); + match K::verify(item, &*engine) { Ok(verified) => { let mut verifying = verification.verifying.lock(); - for e in verifying.iter_mut() { - if e.hash == block_hash { - e.block = Some(verified); + let mut idx = None; + for (i, e) in verifying.iter_mut().enumerate() { + if e.hash == hash { + idx = Some(i); + e.output = Some(verified); break; } } - if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash { + + if idx == Some(0) { // we're next! let mut verified = verification.verified.lock(); let mut bad = verification.bad.lock(); - BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); + VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } }, - Err(err) => { + Err(_) => { let mut verifying = verification.verifying.lock(); let mut verified = verification.verified.lock(); let mut bad = verification.bad.lock(); - warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); - bad.insert(block_hash.clone()); - verifying.retain(|e| e.hash != block_hash); - BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); - ready.set(); + + bad.insert(hash.clone()); + verifying.retain(|e| e.hash != hash); + + if verifying.front().map_or(false, |x| x.output.is_some()) { + VerificationQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); + ready.set(); + } } } } } - fn drain_verifying(verifying: &mut VecDeque, verified: &mut VecDeque, bad: &mut HashSet) { - while !verifying.is_empty() && verifying.front().unwrap().block.is_some() { - let block = verifying.pop_front().unwrap().block.unwrap(); - if bad.contains(block.header.parent_hash()) { - bad.insert(block.header.hash()); - } - else { - verified.push_back(block); + fn drain_verifying(verifying: &mut VecDeque>, verified: &mut VecDeque, bad: &mut HashSet) { + while let Some(output) = verifying.front_mut().and_then(|x| x.output.take()) { + assert!(verifying.pop_front().is_some()); + + if bad.contains(&output.parent_hash()) { + bad.insert(output.hash()); + } else { + verified.push_back(output); } } } @@ -288,21 +294,20 @@ impl BlockQueue { } } - /// Check if the block is currently in the queue - pub fn block_status(&self, hash: &H256) -> BlockStatus { + /// Check if the item is currently in the queue + pub fn status(&self, hash: &H256) -> Status { if self.processing.read().contains(hash) { - return BlockStatus::Queued; + return Status::Queued; } if self.verification.bad.lock().contains(hash) { - return BlockStatus::Bad; + return Status::Bad; } - BlockStatus::Unknown + Status::Unknown } /// Add a block to the queue. - pub fn import_block(&self, bytes: Bytes) -> ImportResult { - let header = BlockView::new(&bytes).header(); - let h = header.hash(); + pub fn import(&self, input: K::Input) -> ImportResult { + let h = input.hash(); { if self.processing.read().contains(&h) { return Err(ImportError::AlreadyQueued.into()); @@ -313,74 +318,71 @@ impl BlockQueue { return Err(ImportError::KnownBad.into()); } - if bad.contains(header.parent_hash()) { + if bad.contains(&input.parent_hash()) { bad.insert(h.clone()); return Err(ImportError::KnownBad.into()); } } - match verify_block_basic(&header, &bytes, &*self.engine) { - Ok(()) => { + match K::create(input, &*self.engine) { + Ok(item) => { self.processing.write().insert(h.clone()); - self.verification.unverified.lock().push_back(UnverifiedBlock { header: header, bytes: bytes }); + self.verification.unverified.lock().push_back(item); self.more_to_verify.notify_all(); Ok(h) }, Err(err) => { - warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); self.verification.bad.lock().insert(h.clone()); Err(err) } } } - /// Mark given block and all its children as bad. Stops verification. - pub fn mark_as_bad(&self, block_hashes: &[H256]) { - if block_hashes.is_empty() { + /// Mark given item and all its children as bad. pauses verification + /// until complete. + pub fn mark_as_bad(&self, hashes: &[H256]) { + if hashes.is_empty() { return; } let mut verified_lock = self.verification.verified.lock(); let mut verified = &mut *verified_lock; let mut bad = self.verification.bad.lock(); let mut processing = self.processing.write(); - bad.reserve(block_hashes.len()); - for hash in block_hashes { + bad.reserve(hashes.len()); + for hash in hashes { bad.insert(hash.clone()); processing.remove(hash); } let mut new_verified = VecDeque::new(); - for block in verified.drain(..) { - if bad.contains(block.header.parent_hash()) { - bad.insert(block.header.hash()); - processing.remove(&block.header.hash()); + for output in verified.drain(..) { + if bad.contains(&output.parent_hash()) { + bad.insert(output.hash()); + processing.remove(&output.hash()); } else { - new_verified.push_back(block); + new_verified.push_back(output); } } *verified = new_verified; } - /// Mark given block as processed - pub fn mark_as_good(&self, block_hashes: &[H256]) { - if block_hashes.is_empty() { + /// Mark given item as processed + pub fn mark_as_good(&self, hashes: &[H256]) { + if hashes.is_empty() { return; } let mut processing = self.processing.write(); - for hash in block_hashes { + for hash in hashes { processing.remove(hash); } } - /// Removes up to `max` verified blocks from the queue - pub fn drain(&self, max: usize) -> Vec { + /// Removes up to `max` verified items from the queue + pub fn drain(&self, max: usize) -> Vec { let mut verified = self.verification.verified.lock(); let count = min(max, verified.len()); - let mut result = Vec::with_capacity(count); - for _ in 0..count { - let block = verified.pop_front().unwrap(); - result.push(block); - } + let result = verified.drain(..count).collect::>(); + self.ready_signal.reset(); if !verified.is_empty() { self.ready_signal.set(); @@ -389,7 +391,7 @@ impl BlockQueue { } /// Get queue status. - pub fn queue_info(&self) -> BlockQueueInfo { + pub fn queue_info(&self) -> QueueInfo { let (unverified_len, unverified_bytes) = { let v = self.verification.unverified.lock(); (v.len(), v.heap_size_of_children()) @@ -402,7 +404,8 @@ impl BlockQueue { let v = self.verification.verified.lock(); (v.len(), v.heap_size_of_children()) }; - BlockQueueInfo { + + QueueInfo { unverified_queue_size: unverified_len, verifying_queue_size: verifying_len, verified_queue_size: verified_len, @@ -428,22 +431,22 @@ impl BlockQueue { } } -impl MayPanic for BlockQueue { +impl MayPanic for VerificationQueue { fn on_panic(&self, closure: F) where F: OnPanicListener { self.panic_handler.on_panic(closure); } } -impl Drop for BlockQueue { +impl Drop for VerificationQueue { fn drop(&mut self) { - trace!(target: "shutdown", "[BlockQueue] Closing..."); + trace!(target: "shutdown", "[VerificationQueue] Closing..."); self.clear(); self.deleting.store(true, AtomicOrdering::Release); self.more_to_verify.notify_all(); for t in self.verifiers.drain(..) { t.join().unwrap(); } - trace!(target: "shutdown", "[BlockQueue] Closed."); + trace!(target: "shutdown", "[VerificationQueue] Closed."); } } @@ -452,7 +455,8 @@ mod tests { use util::*; use io::*; use spec::*; - use block_queue::*; + use super::{BlockQueue, Config}; + use super::kind::blocks::Unverified; use tests::helpers::*; use error::*; use views::*; @@ -460,7 +464,7 @@ mod tests { fn get_test_queue() -> BlockQueue { let spec = get_test_spec(); let engine = spec.engine; - BlockQueue::new(BlockQueueConfig::default(), engine, IoChannel::disconnected()) + BlockQueue::new(Config::default(), engine, IoChannel::disconnected()) } #[test] @@ -468,13 +472,13 @@ mod tests { // TODO better test let spec = Spec::new_test(); let engine = spec.engine; - let _ = BlockQueue::new(BlockQueueConfig::default(), engine, IoChannel::disconnected()); + let _ = BlockQueue::new(Config::default(), engine, IoChannel::disconnected()); } #[test] fn can_import_blocks() { let queue = get_test_queue(); - if let Err(e) = queue.import_block(get_good_dummy_block()) { + if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) { panic!("error importing block that is valid by definition({:?})", e); } } @@ -482,11 +486,11 @@ mod tests { #[test] fn returns_error_for_duplicates() { let queue = get_test_queue(); - if let Err(e) = queue.import_block(get_good_dummy_block()) { + if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) { panic!("error importing block that is valid by definition({:?})", e); } - let duplicate_import = queue.import_block(get_good_dummy_block()); + let duplicate_import = queue.import(Unverified::new(get_good_dummy_block())); match duplicate_import { Err(e) => { match e { @@ -503,14 +507,14 @@ mod tests { let queue = get_test_queue(); let block = get_good_dummy_block(); let hash = BlockView::new(&block).header().hash().clone(); - if let Err(e) = queue.import_block(block) { + if let Err(e) = queue.import(Unverified::new(block)) { panic!("error importing block that is valid by definition({:?})", e); } queue.flush(); queue.drain(10); queue.mark_as_good(&[ hash ]); - if let Err(e) = queue.import_block(get_good_dummy_block()) { + if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) { panic!("error importing block that has already been drained ({:?})", e); } } @@ -518,7 +522,8 @@ mod tests { #[test] fn returns_empty_once_finished() { let queue = get_test_queue(); - queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition"); + queue.import(Unverified::new(get_good_dummy_block())) + .expect("error importing block that is valid by definition"); queue.flush(); queue.drain(1); @@ -529,13 +534,13 @@ mod tests { fn test_mem_limit() { let spec = get_test_spec(); let engine = spec.engine; - let mut config = BlockQueueConfig::default(); + let mut config = Config::default(); config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 let queue = BlockQueue::new(config, engine, IoChannel::disconnected()); assert!(!queue.queue_info().is_full()); let mut blocks = get_good_dummy_block_seq(50); for b in blocks.drain(..) { - queue.import_block(b).unwrap(); + queue.import(Unverified::new(b)).unwrap(); } assert!(queue.queue_info().is_full()); } diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 4e1305a33..f89ac7d9a 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -36,14 +36,22 @@ pub struct PreverifiedBlock { pub bytes: Bytes, } +impl HeapSizeOf for PreverifiedBlock { + fn heap_size_of_children(&self) -> usize { + self.header.heap_size_of_children() + + self.transactions.heap_size_of_children() + + self.bytes.heap_size_of_children() + } +} + /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { - try!(verify_header(&header, engine)); + try!(verify_header_params(&header, engine)); try!(verify_block_integrity(bytes, &header.transactions_root(), &header.uncles_hash())); try!(engine.verify_block_basic(&header, Some(bytes))); for u in try!(UntrustedRlp::new(bytes).at(2)).iter().map(|rlp| rlp.as_val::
()) { let u = try!(u); - try!(verify_header(&u, engine)); + try!(verify_header_params(&u, engine)); try!(engine.verify_block_basic(&u, None)); } // Verify transactions. @@ -179,7 +187,7 @@ pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> } /// Check basic header parameters. -fn verify_header(header: &Header, engine: &Engine) -> Result<(), Error> { +pub fn verify_header_params(header: &Header, engine: &Engine) -> Result<(), Error> { if header.number() >= From::from(BlockNumber::max_value()) { return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() }))) } diff --git a/ethkey/src/signature.rs b/ethkey/src/signature.rs index eec0fbf47..e1afb3940 100644 --- a/ethkey/src/signature.rs +++ b/ethkey/src/signature.rs @@ -25,7 +25,6 @@ use bigint::hash::{H520, H256, FixedHash}; use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address}; #[repr(C)] -#[derive(Eq)] pub struct Signature([u8; 65]); impl Signature { @@ -76,6 +75,9 @@ impl PartialEq for Signature { } } +// manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`. +impl Eq for Signature { } + // also manual for the same reason, but the pretty printing might be useful. impl fmt::Debug for Signature { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { diff --git a/ethstore/src/account/safe_account.rs b/ethstore/src/account/safe_account.rs index 38069a718..315fd283b 100644 --- a/ethstore/src/account/safe_account.rs +++ b/ethstore/src/account/safe_account.rs @@ -16,14 +16,14 @@ use ethkey::{KeyPair, sign, Address, Secret, Signature, Message}; use {json, Error, crypto}; -use crypto::{Keccak256}; +use crypto::Keccak256; use random::Random; use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf}; #[derive(Debug, PartialEq, Clone)] pub struct Crypto { pub cipher: Cipher, - pub ciphertext: [u8; 32], + pub ciphertext: Vec, pub kdf: Kdf, pub mac: [u8; 32], } @@ -95,7 +95,7 @@ impl Crypto { cipher: Cipher::Aes128Ctr(Aes128Ctr { iv: iv, }), - ciphertext: ciphertext, + ciphertext: ciphertext.to_vec(), kdf: Kdf::Pbkdf2(Pbkdf2 { dklen: crypto::KEY_LENGTH as u32, salt: salt, @@ -107,6 +107,10 @@ impl Crypto { } pub fn secret(&self, password: &str) -> Result { + if self.ciphertext.len() > 32 { + return Err(Error::InvalidSecret); + } + let (derived_left_bits, derived_right_bits) = match self.kdf { Kdf::Pbkdf2(ref params) => crypto::derive_key_iterations(password, ¶ms.salt, params.c), Kdf::Scrypt(ref params) => crypto::derive_key_scrypt(password, ¶ms.salt, params.n, params.p, params.r), @@ -122,7 +126,8 @@ impl Crypto { match self.cipher { Cipher::Aes128Ctr(ref params) => { - crypto::aes::decrypt(&derived_left_bits, ¶ms.iv, &self.ciphertext, &mut *secret) + let from = 32 - self.ciphertext.len(); + crypto::aes::decrypt(&derived_left_bits, ¶ms.iv, &self.ciphertext, &mut (&mut *secret)[from..]) }, } diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs index 3016412eb..e4d3b91c6 100644 --- a/ethstore/src/dir/disk.rs +++ b/ethstore/src/dir/disk.rs @@ -76,15 +76,14 @@ impl DiskDirectory { .map(|entry| entry.path()) .collect::>(); - let files: Result, _> = paths.iter() - .map(fs::File::open) - .collect(); - - let files = try!(files); - - files.into_iter() - .map(json::KeyFile::load) - .zip(paths.into_iter()) + paths + .iter() + .map(|p| ( + fs::File::open(p) + .map_err(Error::from) + .and_then(|r| json::KeyFile::load(r).map_err(|e| Error::Custom(format!("{:?}", e)))), + p + )) .map(|(file, path)| match file { Ok(file) => Ok((path.clone(), SafeAccount::from_file( file, Some(path.file_name().and_then(|n| n.to_str()).expect("Keys have valid UTF8 names only.").to_owned()) diff --git a/ethstore/src/json/bytes.rs b/ethstore/src/json/bytes.rs new file mode 100644 index 000000000..fd4a3b995 --- /dev/null +++ b/ethstore/src/json/bytes.rs @@ -0,0 +1,58 @@ +use std::{ops, str}; +use serde::{Deserialize, Deserializer, Error, Serialize, Serializer}; +use rustc_serialize::hex::{ToHex, FromHex, FromHexError}; + +#[derive(Debug, PartialEq)] +pub struct Bytes(Vec); + +impl ops::Deref for Bytes { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Deserialize for Bytes { + fn deserialize(deserializer: &mut D) -> Result + where D: Deserializer + { + let s = try!(String::deserialize(deserializer)); + let data = try!(s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e)))); + Ok(Bytes(data)) + } +} + +impl Serialize for Bytes { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: Serializer { + serializer.serialize_str(&self.0.to_hex()) + } +} + +impl str::FromStr for Bytes { + type Err = FromHexError; + + fn from_str(s: &str) -> Result { + s.from_hex().map(Bytes) + } +} + +impl From<&'static str> for Bytes { + fn from(s: &'static str) -> Self { + s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) + } +} + +impl From> for Bytes { + fn from(v: Vec) -> Self { + Bytes(v) + } +} + +impl From for Vec { + fn from(b: Bytes) -> Self { + b.0 + } +} + diff --git a/ethstore/src/json/crypto.rs b/ethstore/src/json/crypto.rs index e6ecef81f..739a2fea9 100644 --- a/ethstore/src/json/crypto.rs +++ b/ethstore/src/json/crypto.rs @@ -16,12 +16,14 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer, Error}; use serde::de::{Visitor, MapVisitor}; -use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256}; +use super::{Cipher, CipherSer, CipherSerParams, Kdf, KdfSer, KdfSerParams, H256, Bytes}; + +pub type CipherText = Bytes; #[derive(Debug, PartialEq)] pub struct Crypto { pub cipher: Cipher, - pub ciphertext: H256, + pub ciphertext: CipherText, pub kdf: Kdf, pub mac: H256, } diff --git a/ethstore/src/json/hash.rs b/ethstore/src/json/hash.rs index 0079b4f81..25bf51130 100644 --- a/ethstore/src/json/hash.rs +++ b/ethstore/src/json/hash.rs @@ -14,9 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::fmt; -use std::ops; -use std::str::FromStr; +use std::{ops, fmt, str}; use rustc_serialize::hex::{FromHex, ToHex}; use serde::{Serialize, Serializer, Deserialize, Deserializer, Error as SerdeError}; use serde::de::Visitor; @@ -65,7 +63,7 @@ macro_rules! impl_hash { type Value = $name; fn visit_str(&mut self, value: &str) -> Result where E: SerdeError { - FromStr::from_str(value).map_err(SerdeError::custom) + value.parse().map_err(SerdeError::custom) } fn visit_string(&mut self, value: String) -> Result where E: SerdeError { @@ -77,7 +75,7 @@ macro_rules! impl_hash { } } - impl FromStr for $name { + impl str::FromStr for $name { type Err = Error; fn from_str(value: &str) -> Result { @@ -92,6 +90,12 @@ macro_rules! impl_hash { } } + impl From<&'static str> for $name { + fn from(s: &'static str) -> Self { + s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) + } + } + impl From<[u8; $size]> for $name { fn from(bytes: [u8; $size]) -> Self { $name(bytes) diff --git a/ethstore/src/json/id.rs b/ethstore/src/json/id.rs index 2e896458c..ff282a9f8 100644 --- a/ethstore/src/json/id.rs +++ b/ethstore/src/json/id.rs @@ -15,8 +15,7 @@ // along with Parity. If not, see . //! Universaly unique identifier. -use std::str::FromStr; -use std::fmt; +use std::{fmt, str}; use rustc_serialize::hex::{ToHex, FromHex}; use serde::{Deserialize, Serialize, Deserializer, Serializer, Error as SerdeError}; use serde::de::Visitor; @@ -73,7 +72,7 @@ fn copy_into(from: &str, into: &mut [u8]) -> Result<(), Error> { Ok(()) } -impl FromStr for UUID { +impl str::FromStr for UUID { type Err = Error; fn from_str(s: &str) -> Result { @@ -95,6 +94,12 @@ impl FromStr for UUID { } } +impl From<&'static str> for UUID { + fn from(s: &'static str) -> Self { + s.parse().expect(&format!("invalid string literal for {}: '{}'", stringify!(Self), s)) + } +} + impl Serialize for UUID { fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer { @@ -116,7 +121,7 @@ impl Visitor for UUIDVisitor { type Value = UUID; fn visit_str(&mut self, value: &str) -> Result where E: SerdeError { - UUID::from_str(value).map_err(SerdeError::custom) + value.parse().map_err(SerdeError::custom) } fn visit_string(&mut self, value: String) -> Result where E: SerdeError { @@ -126,19 +131,18 @@ impl Visitor for UUIDVisitor { #[cfg(test)] mod tests { - use std::str::FromStr; use super::UUID; #[test] fn uuid_from_str() { - let uuid = UUID::from_str("3198bc9c-6672-5ab3-d995-4942343ae5b6").unwrap(); + let uuid: UUID = "3198bc9c-6672-5ab3-d995-4942343ae5b6".into(); assert_eq!(uuid, UUID::from([0x31, 0x98, 0xbc, 0x9c, 0x66, 0x72, 0x5a, 0xb3, 0xd9, 0x95, 0x49, 0x42, 0x34, 0x3a, 0xe5, 0xb6])); } #[test] fn uuid_from_and_to_str() { let from = "3198bc9c-6672-5ab3-d995-4942343ae5b6"; - let uuid = UUID::from_str(from).unwrap(); + let uuid: UUID = from.into(); let to: String = uuid.into(); assert_eq!(from, &to); } diff --git a/ethstore/src/json/key_file.rs b/ethstore/src/json/key_file.rs index 7d970a15c..6e37c7c89 100644 --- a/ethstore/src/json/key_file.rs +++ b/ethstore/src/json/key_file.rs @@ -98,7 +98,7 @@ impl Visitor for KeyFileVisitor { Some(KeyFileField::Version) => { version = Some(try!(visitor.visit_value())); } Some(KeyFileField::Crypto) => { crypto = Some(try!(visitor.visit_value())); } Some(KeyFileField::Address) => { address = Some(try!(visitor.visit_value())); } - Some(KeyFileField::Name) => { name = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive. + Some(KeyFileField::Name) => { name = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive. Some(KeyFileField::Meta) => { meta = visitor.visit_value().ok(); } // ignore anyhing that is not a string to be permissive. None => { break; } } @@ -153,7 +153,7 @@ impl KeyFile { mod tests { use std::str::FromStr; use serde_json; - use json::{KeyFile, UUID, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt, H128, H160, H256}; + use json::{KeyFile, UUID, Version, Crypto, Cipher, Aes128Ctr, Kdf, Scrypt}; #[test] fn basic_keyfile() { @@ -185,20 +185,20 @@ mod tests { let expected = KeyFile { id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), version: Version::V3, - address: H160::from_str("6edddfc6349aff20bc6467ccf276c5b52487f7a8").unwrap(), + address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(), crypto: Crypto { cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: H128::from_str("b5a7ec855ec9e2c405371356855fec83").unwrap(), + iv: "b5a7ec855ec9e2c405371356855fec83".into(), }), - ciphertext: H256::from_str("7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc").unwrap(), + ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(), kdf: Kdf::Scrypt(Scrypt { n: 262144, dklen: 32, p: 1, r: 8, - salt: H256::from_str("1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209").unwrap(), + salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), }), - mac: H256::from_str("46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f").unwrap(), + mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), }, name: Some("Test".to_owned()), meta: Some("{}".to_owned()), @@ -234,22 +234,22 @@ mod tests { }"#; let expected = KeyFile { - id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), + id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(), version: Version::V3, - address: H160::from_str("6edddfc6349aff20bc6467ccf276c5b52487f7a8").unwrap(), + address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(), crypto: Crypto { cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: H128::from_str("b5a7ec855ec9e2c405371356855fec83").unwrap(), + iv: "b5a7ec855ec9e2c405371356855fec83".into(), }), - ciphertext: H256::from_str("7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc").unwrap(), + ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(), kdf: Kdf::Scrypt(Scrypt { n: 262144, dklen: 32, p: 1, r: 8, - salt: H256::from_str("1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209").unwrap(), + salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), }), - mac: H256::from_str("46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f").unwrap(), + mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), }, name: None, meta: None, @@ -262,22 +262,22 @@ mod tests { #[test] fn to_and_from_json() { let file = KeyFile { - id: UUID::from_str("8777d9f6-7860-4b9b-88b7-0b57ee6b3a73").unwrap(), + id: "8777d9f6-7860-4b9b-88b7-0b57ee6b3a73".into(), version: Version::V3, - address: H160::from_str("6edddfc6349aff20bc6467ccf276c5b52487f7a8").unwrap(), + address: "6edddfc6349aff20bc6467ccf276c5b52487f7a8".into(), crypto: Crypto { cipher: Cipher::Aes128Ctr(Aes128Ctr { - iv: H128::from_str("b5a7ec855ec9e2c405371356855fec83").unwrap(), + iv: "b5a7ec855ec9e2c405371356855fec83".into(), }), - ciphertext: H256::from_str("7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc").unwrap(), + ciphertext: "7203da0676d141b138cd7f8e1a4365f59cc1aa6978dc5443f364ca943d7cb4bc".into(), kdf: Kdf::Scrypt(Scrypt { n: 262144, dklen: 32, p: 1, r: 8, - salt: H256::from_str("1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209").unwrap(), + salt: "1e8642fdf1f87172492c1412fc62f8db75d796cdfa9c53c3f2b11e44a2a1b209".into(), }), - mac: H256::from_str("46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f").unwrap(), + mac: "46325c5d4e8c991ad2683d525c7854da387138b6ca45068985aa4959fa2b8c8f".into(), }, name: Some("Test".to_owned()), meta: None, diff --git a/ethstore/src/json/mod.rs.in b/ethstore/src/json/mod.rs.in index 4f9fdbfe3..133d9821e 100644 --- a/ethstore/src/json/mod.rs.in +++ b/ethstore/src/json/mod.rs.in @@ -1,3 +1,4 @@ +mod bytes; mod cipher; mod crypto; mod error; @@ -8,8 +9,9 @@ mod key_file; mod presale; mod version; +pub use self::bytes::Bytes; pub use self::cipher::{Cipher, CipherSer, CipherSerParams, Aes128Ctr}; -pub use self::crypto::Crypto; +pub use self::crypto::{Crypto, CipherText}; pub use self::error::Error; pub use self::hash::{H128, H160, H256}; pub use self::id::UUID; diff --git a/ethstore/src/json/presale.rs b/ethstore/src/json/presale.rs index 77394fcb1..d1cffcb6a 100644 --- a/ethstore/src/json/presale.rs +++ b/ethstore/src/json/presale.rs @@ -1,30 +1,8 @@ use std::io::Read; -use std::ops::Deref; use serde_json; -use serde::{Deserialize, Deserializer, Error}; -use rustc_serialize::hex::FromHex; -use super::{H160}; +use super::{H160, Bytes}; -#[derive(Debug, PartialEq)] -pub struct Encseed(Vec); - -impl Deref for Encseed { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Deserialize for Encseed { - fn deserialize(deserializer: &mut D) -> Result - where D: Deserializer - { - let s = try!(String::deserialize(deserializer)); - let data = try!(s.from_hex().map_err(|e| Error::custom(format!("Invalid hex value {}", e)))); - Ok(Encseed(data)) - } -} +pub type Encseed = Bytes; #[derive(Debug, PartialEq, Deserialize)] pub struct PresaleWallet { @@ -43,8 +21,7 @@ impl PresaleWallet { mod tests { use std::str::FromStr; use serde_json; - use rustc_serialize::hex::FromHex; - use json::{PresaleWallet, H160, Encseed}; + use json::{PresaleWallet, H160}; #[test] fn presale_wallet() { @@ -57,7 +34,7 @@ mod tests { } "#; let expected = PresaleWallet { - encseed: Encseed("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".from_hex().unwrap()), + encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066".into(), address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(), }; @@ -77,7 +54,7 @@ mod tests { } "#; let expected = PresaleWallet { - encseed: Encseed("137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".from_hex().unwrap()), + encseed: "137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dedb3bc0a9ac6c79b9c426c5878ca2c9d06ff42a23cb648312fc32ba83649de0928e066137103c28caeebbcea5d7f95edb97a289ded151b72159137cb7b2671f394f54cff8c121589dcb373e267225547b3c71cbdb54f6e48ec85cd549f96cf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0dcf0d".into(), address: H160::from_str("ede84640d1a1d3e06902048e67aa7db8d52c2ce1").unwrap(), }; diff --git a/ethstore/tests/api.rs b/ethstore/tests/api.rs index 83aa04874..e1667607b 100644 --- a/ethstore/tests/api.rs +++ b/ethstore/tests/api.rs @@ -19,9 +19,8 @@ extern crate ethstore; mod util; -use std::str::FromStr; use ethstore::{SecretStore, EthStore}; -use ethstore::ethkey::{Random, Generator, Secret, Address}; +use ethstore::ethkey::{Random, Generator, Secret, KeyPair, verify_address}; use ethstore::dir::DiskDirectory; use util::TransientDir; @@ -103,14 +102,21 @@ fn pat_path() -> &'static str { } } +fn ciphertext_path() -> &'static str { + match ::std::fs::metadata("ethstore") { + Ok(_) => "ethstore/tests/res/ciphertext", + Err(_) => "tests/res/ciphertext", + } +} + #[test] fn secret_store_laod_geth_files() { let dir = DiskDirectory::at(test_path()); let store = EthStore::open(Box::new(dir)).unwrap(); assert_eq!(store.accounts().unwrap(), vec![ - Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap(), - Address::from_str("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf").unwrap(), - Address::from_str("63121b431a52f8043c16fcf0d1df9cb7b5f66649").unwrap(), + "3f49624084b67849c7b4e805c5988c21a430f9d9".into(), + "5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into(), + "63121b431a52f8043c16fcf0d1df9cb7b5f66649".into(), ]); } @@ -119,9 +125,30 @@ fn secret_store_load_pat_files() { let dir = DiskDirectory::at(pat_path()); let store = EthStore::open(Box::new(dir)).unwrap(); assert_eq!(store.accounts().unwrap(), vec![ - Address::from_str("3f49624084b67849c7b4e805c5988c21a430f9d9").unwrap(), - Address::from_str("5ba4dcf897e97c2bdf8315b9ef26c13c085988cf").unwrap(), + "3f49624084b67849c7b4e805c5988c21a430f9d9".into(), + "5ba4dcf897e97c2bdf8315b9ef26c13c085988cf".into(), ]); } +#[test] +fn test_decrypting_files_with_short_ciphertext() { + // 31e9d1e6d844bd3a536800ef8d8be6a9975db509, 30 + let kp1 = KeyPair::from_secret("000081c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018".into()).unwrap(); + // d1e64e5480bfaf733ba7d48712decb8227797a4e , 31 + let kp2 = KeyPair::from_secret("00fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35".into()).unwrap(); + let dir = DiskDirectory::at(ciphertext_path()); + let store = EthStore::open(Box::new(dir)).unwrap(); + let accounts = store.accounts().unwrap(); + assert_eq!(accounts, vec![ + "31e9d1e6d844bd3a536800ef8d8be6a9975db509".into(), + "d1e64e5480bfaf733ba7d48712decb8227797a4e".into(), + ]); + let message = Default::default(); + + let s1 = store.sign(&accounts[0], "foo", &message).unwrap(); + let s2 = store.sign(&accounts[1], "foo", &message).unwrap(); + assert!(verify_address(&accounts[0], &s1, &message).unwrap()); + assert!(verify_address(&kp1.address(), &s1, &message).unwrap()); + assert!(verify_address(&kp2.address(), &s2, &message).unwrap()); +} diff --git a/ethstore/tests/res/ciphertext/30.json b/ethstore/tests/res/ciphertext/30.json new file mode 100644 index 000000000..c4f5ad184 --- /dev/null +++ b/ethstore/tests/res/ciphertext/30.json @@ -0,0 +1,21 @@ +{ + "address" : "31e9d1e6d844bd3a536800ef8d8be6a9975db509", + "crypto" : { + "cipher" : "aes-128-ctr", + "cipherparams" : { + "iv" : "3ca92af36ad7c2cd92454c59cea5ef00" + }, + "ciphertext" : "108b7d34f3442fc26ab1ab90ca91476ba6bfa8c00975a49ef9051dc675aa", + "kdf" : "scrypt", + "kdfparams" : { + "dklen" : 32, + "n" : 2, + "r" : 8, + "p" : 1, + "salt" : "d0769e608fb86cda848065642a9c6fa046845c928175662b8e356c77f914cd3b" + }, + "mac" : "75d0e6759f7b3cefa319c3be41680ab6beea7d8328653474bd06706d4cc67420" + }, + "id" : "a37e1559-5955-450d-8075-7b8931b392b2", + "version" : 3 +} diff --git a/ethstore/tests/res/ciphertext/31.json b/ethstore/tests/res/ciphertext/31.json new file mode 100644 index 000000000..9c2612b03 --- /dev/null +++ b/ethstore/tests/res/ciphertext/31.json @@ -0,0 +1,21 @@ +{ + "address" : "d1e64e5480bfaf733ba7d48712decb8227797a4e", + "crypto" : { + "cipher" : "aes-128-ctr", + "cipherparams" : { + "iv" : "e0c41130a323adc1446fc82f724bca2f" + }, + "ciphertext" : "9517cd5bdbe69076f9bf5057248c6c050141e970efa36ce53692d5d59a3984", + "kdf" : "scrypt", + "kdfparams" : { + "dklen" : 32, + "n" : 2, + "r" : 8, + "p" : 1, + "salt" : "711f816911c92d649fb4c84b047915679933555030b3552c1212609b38208c63" + }, + "mac" : "d5e116151c6aa71470e67a7d42c9620c75c4d23229847dcc127794f0732b0db5" + }, + "id" : "fecfc4ce-e956-48fd-953b-30f8b52ed66c", + "version" : 3 +} diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 94684129c..bc24afa1e 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -123,7 +123,6 @@ impl Args { } } - fn die(msg: &'static str) -> ! { println!("{}", msg); ::std::process::exit(-1) diff --git a/ipc/codegen/src/codegen.rs b/ipc/codegen/src/codegen.rs index 89bd9548c..9caa436bc 100644 --- a/ipc/codegen/src/codegen.rs +++ b/ipc/codegen/src/codegen.rs @@ -49,7 +49,7 @@ pub fn expand_ipc_implementation( let item = match *annotatable { Annotatable::Item(ref item) => item, _ => { - cx.span_err(meta_item.span, "`#[derive(Ipc)]` may only be applied to struct implementations"); + cx.span_err(meta_item.span, "`#[ipc]` may only be applied to implementations and traits"); return; }, }; @@ -832,7 +832,7 @@ fn implement_interface( _ => { cx.span_err( item.span, - "`#[derive(Ipc)]` may only be applied to implementations and traits"); + "`#[ipc]` may only be applied to implementations and traits"); return Err(Error); }, }; diff --git a/ipc/codegen/src/lib.rs b/ipc/codegen/src/lib.rs index ce1ca8592..dc58c6a8a 100644 --- a/ipc/codegen/src/lib.rs +++ b/ipc/codegen/src/lib.rs @@ -56,7 +56,7 @@ pub fn expand(src: &std::path::Path, dst: &std::path::Path) { } #[cfg(feature = "with-syntex")] -pub fn register(reg: &mut syntex::Registry) { +pub fn register_cleaner(reg: &mut syntex::Registry) { use syntax::{ast, fold}; #[cfg(feature = "with-syntex")] @@ -66,6 +66,7 @@ pub fn register(reg: &mut syntex::Registry) { fn fold_attribute(&mut self, attr: ast::Attribute) -> Option { match attr.node.value.node { ast::MetaItemKind::List(ref n, _) if n == &"ipc" => { return None; } + ast::MetaItemKind::Word(ref n) if n == &"ipc" => { return None; } _ => {} } @@ -80,19 +81,24 @@ pub fn register(reg: &mut syntex::Registry) { fold::Folder::fold_crate(&mut StripAttributeFolder, krate) } + reg.add_post_expansion_pass(strip_attributes); +} + +#[cfg(feature = "with-syntex")] +pub fn register(reg: &mut syntex::Registry) { reg.add_attr("feature(custom_derive)"); reg.add_attr("feature(custom_attribute)"); - reg.add_decorator("derive_Ipc", codegen::expand_ipc_implementation); + reg.add_decorator("ipc", codegen::expand_ipc_implementation); reg.add_decorator("derive_Binary", serialization::expand_serialization_implementation); - reg.add_post_expansion_pass(strip_attributes); + register_cleaner(reg); } #[cfg(not(feature = "with-syntex"))] pub fn register(reg: &mut rustc_plugin::Registry) { reg.register_syntax_extension( - syntax::parse::token::intern("derive_Ipc"), + syntax::parse::token::intern("ipc"), syntax::ext::base::MultiDecorator( Box::new(codegen::expand_ipc_implementation))); reg.register_syntax_extension( @@ -104,7 +110,34 @@ pub fn register(reg: &mut rustc_plugin::Registry) { } #[derive(Debug)] -pub enum Error { InvalidFileName, ExpandFailure } +pub enum Error { InvalidFileName, ExpandFailure, Io(std::io::Error) } + +impl std::convert::From for Error { + fn from(err: std::io::Error) -> Self { + Error::Io(err) + } +} + +pub fn derive_ipc_cond(src_path: &str, has_feature: bool) -> Result<(), Error> { + if has_feature { derive_ipc(src_path) } + else { cleanup_ipc(src_path) } +} + +pub fn cleanup_ipc(src_path: &str) -> Result<(), Error> { + use std::env; + use std::path::{Path, PathBuf}; + + let out_dir = env::var_os("OUT_DIR").unwrap(); + let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned())); + let mut registry = syntex::Registry::new(); + register_cleaner(&mut registry); + if let Err(_) = registry.expand("", &Path::new(src_path), &Path::new(&out_dir).join(&file_name)) + { + // will be reported by compiler + return Err(Error::ExpandFailure) + } + Ok(()) +} pub fn derive_ipc(src_path: &str) -> Result<(), Error> { use std::env; @@ -113,11 +146,11 @@ pub fn derive_ipc(src_path: &str) -> Result<(), Error> { let out_dir = env::var_os("OUT_DIR").unwrap(); let file_name = try!(PathBuf::from(src_path).file_name().ok_or(Error::InvalidFileName).map(|val| val.to_str().unwrap().to_owned())); + let final_path = Path::new(&out_dir).join(&file_name); + let mut intermediate_file_name = file_name.clone(); intermediate_file_name.push_str(".rpc.in"); - let intermediate_path = Path::new(&out_dir).join(&intermediate_file_name); - let final_path = Path::new(&out_dir).join(&file_name); { let mut registry = syntex::Registry::new(); diff --git a/ipc/hypervisor/Cargo.toml b/ipc/hypervisor/Cargo.toml index a4c462bd0..d730b9bcf 100644 --- a/ipc/hypervisor/Cargo.toml +++ b/ipc/hypervisor/Cargo.toml @@ -13,6 +13,7 @@ nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" } ethcore-ipc-nano = { path = "../nano" } semver = "0.2" log = "0.3" +time = "0.1" [build-dependencies] ethcore-ipc-codegen = { path = "../codegen" } diff --git a/ipc/hypervisor/src/lib.rs b/ipc/hypervisor/src/lib.rs index 78b8b04ce..c7543ca91 100644 --- a/ipc/hypervisor/src/lib.rs +++ b/ipc/hypervisor/src/lib.rs @@ -22,6 +22,7 @@ extern crate ethcore_ipc as ipc; extern crate ethcore_ipc_nano as nanoipc; extern crate semver; #[macro_use] extern crate log; +extern crate time; pub mod service; @@ -187,23 +188,40 @@ impl Hypervisor { } /// Waits for every required module to check in - pub fn wait_for_shutdown(&self) { + pub fn wait_for_shutdown(&self) -> bool { + use time::{PreciseTime, Duration}; + let mut worker = self.ipc_worker.write().unwrap(); + let start = PreciseTime::now(); while !self.modules_shutdown() { - worker.poll() + worker.poll(); + if start.to(PreciseTime::now()) > Duration::seconds(30) { + warn!("Some modules failed to shutdown gracefully, they will be terminated."); + break; + } } + self.modules_shutdown() } /// Shutdown the ipc and all managed child processes pub fn shutdown(&self) { let mut childs = self.processes.write().unwrap(); - for (ref mut module, _) in childs.iter_mut() { + for (ref module, _) in childs.iter() { trace!(target: "hypervisor", "Stopping process module: {}", module); self.service.send_shutdown(**module); } trace!(target: "hypervisor", "Waiting for shutdown..."); - self.wait_for_shutdown(); - trace!(target: "hypervisor", "All modules reported shutdown"); + if self.wait_for_shutdown() { + trace!(target: "hypervisor", "All modules reported shutdown"); + return; + } + + for (ref module, ref mut process) in childs.iter_mut() { + if self.service.is_running(**module) { + process.kill().unwrap(); + trace!("Terminated {}", module); + } + } } } diff --git a/ipc/hypervisor/src/service.rs.in b/ipc/hypervisor/src/service.rs.in index 74d289f50..e80a1ec30 100644 --- a/ipc/hypervisor/src/service.rs.in +++ b/ipc/hypervisor/src/service.rs.in @@ -39,13 +39,12 @@ pub struct ModuleState { shutdown: bool, } - -#[derive(Ipc)] +#[ipc] pub trait ControlService { fn shutdown(&self) -> bool; } -#[derive(Ipc)] +#[ipc] impl HypervisorService { // return type for making method synchronous fn module_ready(&self, module_id: u64, control_url: String) -> bool { @@ -106,6 +105,10 @@ impl HypervisorService { self.modules.read().unwrap().iter().filter(|&(_, module)| module.started && !module.shutdown).count() } + pub fn is_running(&self, id: IpcModuleId) -> bool { + self.modules.read().unwrap().get(&id).map(|module| module.started && !module.shutdown).unwrap_or(false) + } + pub fn send_shutdown(&self, module_id: IpcModuleId) { let modules = self.modules.read().unwrap(); modules.get(&module_id).map(|module| { diff --git a/ipc/tests/nested.rs.in b/ipc/tests/nested.rs.in index 4f0ac4a8a..df0c9bde3 100644 --- a/ipc/tests/nested.rs.in +++ b/ipc/tests/nested.rs.in @@ -33,7 +33,7 @@ impl IpcConfig for DBWriter {} #[derive(Binary)] pub enum DBError { Write, Read } -#[derive(Ipc)] +#[ipc] impl DBWriter for DB { fn write(&self, data: Vec) -> Result<(), DBError> { let mut writes = self.writes.write().unwrap(); @@ -48,7 +48,7 @@ impl DBWriter for DB { } } -#[derive(Ipc)] +#[ipc] trait DBNotify { fn notify(&self, a: u64, b: u64) -> bool; } diff --git a/ipc/tests/service.rs.in b/ipc/tests/service.rs.in index 9c221d481..cd9a5a6b2 100644 --- a/ipc/tests/service.rs.in +++ b/ipc/tests/service.rs.in @@ -28,7 +28,7 @@ pub struct CustomData { pub b: u64, } -#[derive(Ipc)] +#[ipc] impl Service { fn commit(&self, f: u32) -> u32 { let mut lock = self.commits.write().unwrap(); diff --git a/ipc/tests/with_attrs.rs.in b/ipc/tests/with_attrs.rs.in index bbf5b894a..f65627fce 100644 --- a/ipc/tests/with_attrs.rs.in +++ b/ipc/tests/with_attrs.rs.in @@ -18,7 +18,6 @@ use ipc::IpcConfig; pub struct BadlyNamedService; -#[derive(Ipc)] #[ipc(client_ident="PrettyNamedClient")] impl BadlyNamedService { fn is_zero(&self, x: u64) -> bool { diff --git a/json/src/spec/ethash.rs b/json/src/spec/ethash.rs index 10d4f84ee..d20ab3992 100644 --- a/json/src/spec/ethash.rs +++ b/json/src/spec/ethash.rs @@ -32,6 +32,9 @@ pub struct EthashParams { #[serde(rename="difficultyBoundDivisor")] pub difficulty_bound_divisor: Uint, /// See main EthashParams docs. + #[serde(rename="difficultyIncrementDivisor")] + pub difficulty_increment_divisor: Option, + /// See main EthashParams docs. #[serde(rename="durationLimit")] pub duration_limit: Uint, /// See main EthashParams docs. @@ -39,9 +42,11 @@ pub struct EthashParams { pub block_reward: Uint, /// See main EthashParams docs. pub registrar: Option
, + /// See main EthashParams docs. #[serde(rename="frontierCompatibilityModeLimit")] pub frontier_compatibility_mode_limit: Option, + /// See main EthashParams docs. #[serde(rename="daoHardforkTransition")] pub dao_hardfork_transition: Option, @@ -51,6 +56,16 @@ pub struct EthashParams { /// See main EthashParams docs. #[serde(rename="daoHardforkAccounts")] pub dao_hardfork_accounts: Option>, + + /// See main EthashParams docs. + #[serde(rename="difficultyHardforkTransition")] + pub difficulty_hardfork_transition: Option, + /// See main EthashParams docs. + #[serde(rename="difficultyHardforkBoundDivisor")] + pub difficulty_hardfork_bound_divisor: Option, + /// See main EthashParams docs. + #[serde(rename="bombDefuseTransition")] + pub bomb_defuse_transition: Option, } /// Ethash engine deserialization. @@ -99,7 +114,10 @@ mod tests { "0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97", "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", "0x807640a13483f8ac783c557fcdf27be11ea4ac7a" - ] + ], + "difficultyHardforkTransition": "0x59d9", + "difficultyHardforkBoundDivisor": "0x0200", + "bombDefuseTransition": "0x42" } }"#; diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index 62c63d6b5..676feff93 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -31,6 +31,9 @@ pub struct Params { /// Network id. #[serde(rename="networkID")] pub network_id: Uint, + /// Name of the main ("eth") subprotocol. + #[serde(rename="subprotocolName")] + pub subprotocol_name: Option, /// Minimum gas limit. #[serde(rename="minGasLimit")] pub min_gas_limit: Uint, @@ -53,6 +56,7 @@ mod tests { "frontierCompatibilityModeLimit": "0x118c30", "maximumExtraDataSize": "0x20", "networkID" : "0x1", + "subprotocolName" : "exp", "minGasLimit": "0x1388", "accountStartNonce": "0x00" }"#; diff --git a/logger/src/lib.rs b/logger/src/lib.rs index e672a3e28..79655d2f6 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -91,10 +91,10 @@ pub fn setup_log(config: &Config) -> Result, String> { let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); let with_color = if max_log_level() <= LogLevelFilter::Info { - format!("{}{}", Colour::Black.bold().paint(timestamp), record.args()) + format!("{} {}", Colour::Black.bold().paint(timestamp), record.args()) } else { let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); - format!("{}{} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args()) + format!("{} {} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args()) }; let removed_color = kill_color(with_color.as_ref()); diff --git a/parity/account.rs b/parity/account.rs index 26a974090..9d400cab5 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethcore::ethstore::{EthStore, import_accounts}; +use ethcore::ethstore::{EthStore, SecretStore, import_accounts, read_geth_accounts}; use ethcore::ethstore::dir::DiskDirectory; use ethcore::account_provider::AccountProvider; use helpers::{password_prompt, password_from_file}; @@ -24,6 +24,7 @@ pub enum AccountCmd { New(NewAccount), List(String), Import(ImportAccounts), + ImportFromGeth(ImportFromGethAccounts) } #[derive(Debug, PartialEq)] @@ -39,11 +40,21 @@ pub struct ImportAccounts { pub to: String, } +/// Parameters for geth accounts' import +#[derive(Debug, PartialEq)] +pub struct ImportFromGethAccounts { + /// import mainnet (false) or testnet (true) accounts + pub testnet: bool, + /// directory to import accounts to + pub to: String, +} + pub fn execute(cmd: AccountCmd) -> Result { match cmd { AccountCmd::New(new_cmd) => new(new_cmd), AccountCmd::List(path) => list(path), AccountCmd::Import(import_cmd) => import(import_cmd), + AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd) } } @@ -51,6 +62,13 @@ fn keys_dir(path: String) -> Result { DiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e)) } +fn secret_store(dir: Box, iterations: Option) -> Result { + match iterations { + Some(i) => EthStore::open_with_iterations(dir, i), + _ => EthStore::open(dir) + }.map_err(|e| format!("Could not open keys store: {}", e)) +} + fn new(n: NewAccount) -> Result { let password: String = match n.password_file { Some(file) => try!(password_from_file(file)), @@ -58,7 +76,7 @@ fn new(n: NewAccount) -> Result { }; let dir = Box::new(try!(keys_dir(n.path))); - let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap()); + let secret_store = Box::new(try!(secret_store(dir, Some(n.iterations)))); let acc_provider = AccountProvider::new(secret_store); let new_account = try!(acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e))); Ok(format!("{:?}", new_account)) @@ -66,7 +84,7 @@ fn new(n: NewAccount) -> Result { fn list(path: String) -> Result { let dir = Box::new(try!(keys_dir(path))); - let secret_store = Box::new(EthStore::open(dir).unwrap()); + let secret_store = Box::new(try!(secret_store(dir, None))); let acc_provider = AccountProvider::new(secret_store); let accounts = acc_provider.accounts(); let result = accounts.into_iter() @@ -86,3 +104,17 @@ fn import(i: ImportAccounts) -> Result { } Ok(format!("{}", imported)) } + +fn import_geth(i: ImportFromGethAccounts) -> Result { + use std::io::ErrorKind; + use ethcore::ethstore::Error; + + let dir = Box::new(try!(keys_dir(i.to))); + let secret_store = Box::new(try!(secret_store(dir, None))); + let geth_accounts = read_geth_accounts(i.testnet); + match secret_store.import_geth_accounts(geth_accounts, i.testnet) { + Ok(v) => Ok(format!("Successfully imported {} account(s) from geth.", v.len())), + Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => Err("Failed to find geth keys folder.".into()), + Err(err) => Err(format!("Import geth accounts failed. {}", err)) + } +} diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 3dfdac804..d4a4d8217 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -30,8 +30,8 @@ use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, use ethcore::error::ImportError; use ethcore::miner::Miner; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; use informant::{Informant, MillisecondDuration}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use io_handler::ImportIoHandler; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; @@ -81,6 +81,7 @@ pub struct ImportBlockchain { pub wal: bool, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub vm_type: VMType, } @@ -96,6 +97,7 @@ pub struct ExportBlockchain { pub compaction: DatabaseCompactionProfile, pub wal: bool, pub mode: Mode, + pub fat_db: Switch, pub tracing: Switch, pub from_block: BlockID, pub to_block: BlockID, @@ -135,14 +137,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // load user defaults let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - fdlimit::raise_fd_limit(); // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -151,7 +156,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); // build client let service = try!(ClientService::start( @@ -283,14 +288,17 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // load user defaults let user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - fdlimit::raise_fd_limit(); // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -299,7 +307,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result { try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index ec5dfbe35..2363f1740 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -67,7 +67,8 @@ usd_per_eth = "auto" price_update_period = "hourly" gas_floor_target = "4700000" gas_cap = "6283184" -tx_queue_size = 1024 +tx_queue_size = 2048 +tx_queue_gas = "auto" tx_gas_limit = "6283184" extra_data = "Parity" remove_solved = false @@ -82,7 +83,7 @@ cache_size_queue = 50 cache_size = 128 # Overrides above caches with total size fast_and_loose = false db_compaction = "ssd" -fat_db = false +fat_db = "auto" [snapshots] disable_periodic = false diff --git a/parity/cli/config.toml b/parity/cli/config.toml index 11ec333aa..4ab691679 100644 --- a/parity/cli/config.toml +++ b/parity/cli/config.toml @@ -41,6 +41,7 @@ reseal_on_txs = "all" reseal_min_period = 4000 price_update_period = "hourly" tx_queue_size = 2048 +tx_queue_gas = "auto" [footprint] tracing = "on" @@ -49,7 +50,7 @@ cache_size_db = 128 cache_size_blocks = 16 cache_size_queue = 100 db_compaction = "ssd" -fat_db = true +fat_db = "off" [snapshots] disable_periodic = true diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b8b10ec1d..10348b21b 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -193,8 +193,10 @@ usage! { or |c: &Config| otry!(c.mining).gas_cap.clone(), flag_extra_data: Option = None, or |c: &Config| otry!(c.mining).extra_data.clone().map(Some), - flag_tx_queue_size: usize = 1024usize, + flag_tx_queue_size: usize = 2048usize, or |c: &Config| otry!(c.mining).tx_queue_size.clone(), + flag_tx_queue_gas: String = "auto", + or |c: &Config| otry!(c.mining).tx_queue_gas.clone(), flag_remove_solved: bool = false, or |c: &Config| otry!(c.mining).remove_solved.clone(), flag_notify_work: Option = None, @@ -217,7 +219,7 @@ usage! { or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), flag_db_compaction: String = "ssd", or |c: &Config| otry!(c.footprint).db_compaction.clone(), - flag_fat_db: bool = false, + flag_fat_db: String = "auto", or |c: &Config| otry!(c.footprint).fat_db.clone(), // -- Import/Export Options @@ -348,6 +350,7 @@ struct Mining { gas_cap: Option, extra_data: Option, tx_queue_size: Option, + tx_queue_gas: Option, remove_solved: Option, notify_work: Option>, } @@ -362,7 +365,7 @@ struct Footprint { cache_size_blocks: Option, cache_size_queue: Option, db_compaction: Option, - fat_db: Option, + fat_db: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -522,7 +525,8 @@ mod tests { flag_gas_floor_target: "4700000".into(), flag_gas_cap: "6283184".into(), flag_extra_data: Some("Parity".into()), - flag_tx_queue_size: 1024usize, + flag_tx_queue_size: 2048usize, + flag_tx_queue_gas: "auto".into(), flag_remove_solved: false, flag_notify_work: Some("http://localhost:3001".into()), @@ -535,7 +539,7 @@ mod tests { flag_cache_size: Some(128), flag_fast_and_loose: false, flag_db_compaction: "ssd".into(), - flag_fat_db: false, + flag_fat_db: "auto".into(), // -- Import/Export Options flag_from: "1".into(), @@ -673,6 +677,7 @@ mod tests { gas_floor_target: None, gas_cap: None, tx_queue_size: Some(2048), + tx_queue_gas: Some("auto".into()), tx_gas_limit: None, extra_data: None, remove_solved: None, @@ -687,7 +692,7 @@ mod tests { cache_size_blocks: Some(16), cache_size_queue: Some(100), db_compaction: Some("ssd".into()), - fat_db: Some(true), + fat_db: Some("off".into()), }), snapshots: Some(Snapshots { disable_periodic: Some(true), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index a94f55a8d..ca75c9ee0 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -31,8 +31,8 @@ Operating Options: (default: {flag_mode_alarm}). --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, - homestead, mainnet, morden, classic or testnet - (default: {flag_chain}). + homestead, mainnet, morden, classic, expanse or + testnet (default: {flag_chain}). -d --db-path PATH Specify the database & configuration directory path (default: {flag_db_path}). --keys-path PATH Specify the path for JSON key files to be found @@ -44,7 +44,8 @@ Account Options: ACCOUNTS is a comma-delimited list of addresses. Implies --no-signer. (default: {flag_unlock:?}) --password FILE Provide a file containing a password for unlocking - an account. (default: {flag_password:?}) + an account. Leading and trailing whitespace is trimmed. + (default: {flag_password:?}) --keys-iterations NUM Specify the number of iterations to use when deriving key from the password (bigger is more secure) (default: {flag_keys_iterations}). @@ -183,6 +184,10 @@ Sealing/Mining Options: more than 32 characters. (default: {flag_extra_data:?}) --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting to be included in next block) (default: {flag_tx_queue_size}). + --tx-queue-gas LIMIT Maximum amount of total gas for external transactions in + the queue. LIMIT can be either an amount of gas or + 'auto' or 'off'. 'auto' sets the limit to be 2x + the current block gas limit. (default: {flag_tx_queue_gas}). --remove-solved Move solved blocks from the work package queue instead of cloning them. This gives a slightly faster import speed, but means that extra solutions @@ -217,7 +222,10 @@ Footprint Options: --db-compaction TYPE Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs (default: {flag_db_compaction}). - --fat-db Fat database. (default: {flag_fat_db}) + --fat-db BOOL Build appropriate information to allow enumeration + of all accounts and storage keys. Doubles the size + of the state database. BOOL may be one of on, off + or auto. (default: {flag_fat_db}) Import/Export Options: --from BLOCK Export from block BLOCK, which may be an index or diff --git a/parity/configuration.rs b/parity/configuration.rs index 1aa338c26..56952219c 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -30,8 +30,8 @@ use rpc::{IpcConfiguration, HttpConfiguration}; use ethcore_rpc::NetworkSettings; use cache::CacheConfig; use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, -geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address}; -use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, SpecType}; +geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address, to_gas_limit}; +use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras}; use ethcore_logger::Config as LogConfig; use dir::Directories; use dapps::Configuration as DappsConfiguration; @@ -39,7 +39,7 @@ use signer::Configuration as SignerConfiguration; use run::RunCmd; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat}; use presale::ImportWallet; -use account::{AccountCmd, NewAccount, ImportAccounts}; +use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts}; use snapshot::{self, SnapshotCommand}; #[derive(Debug, PartialEq)] @@ -84,6 +84,7 @@ impl Configuration { let cache_config = self.cache_config(); let spec = try!(self.chain().parse()); let tracing = try!(self.args.flag_tracing.parse()); + let fat_db = try!(self.args.flag_fat_db.parse()); let compaction = try!(self.args.flag_db_compaction.parse()); let wal = !self.args.flag_fast_and_loose; let enable_network = self.enable_network(&mode); @@ -119,6 +120,14 @@ impl Configuration { unreachable!(); }; Cmd::Account(account_cmd) + } else if self.args.flag_import_geth_keys { + let account_cmd = AccountCmd::ImportFromGeth( + ImportFromGethAccounts { + to: dirs.keys, + testnet: self.args.flag_testnet + } + ); + Cmd::Account(account_cmd) } else if self.args.cmd_wallet { let presale_cmd = ImportWallet { iterations: self.args.flag_keys_iterations, @@ -140,6 +149,7 @@ impl Configuration { wal: wal, mode: mode, tracing: tracing, + fat_db: fat_db, vm_type: vm_type, }; Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) @@ -156,6 +166,7 @@ impl Configuration { wal: wal, mode: mode, tracing: tracing, + fat_db: fat_db, from_block: try!(to_block_id(&self.args.flag_from)), to_block: try!(to_block_id(&self.args.flag_to)), }; @@ -169,6 +180,7 @@ impl Configuration { logger_config: logger_config, mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, file_path: self.args.arg_file.clone(), wal: wal, @@ -185,6 +197,7 @@ impl Configuration { logger_config: logger_config, mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, file_path: self.args.arg_file.clone(), wal: wal, @@ -216,6 +229,7 @@ impl Configuration { miner_extras: try!(self.miner_extras()), mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, wal: wal, vm_type: vm_type, @@ -313,7 +327,6 @@ impl Configuration { fn accounts_config(&self) -> Result { let cfg = AccountsConfig { iterations: self.args.flag_keys_iterations, - import_keys: self.args.flag_import_geth_keys, testnet: self.args.flag_testnet, password_files: self.args.flag_password.clone(), unlocked_accounts: try!(to_addresses(&self.args.flag_unlock)), @@ -335,6 +348,7 @@ impl Configuration { None => U256::max_value(), }, tx_queue_size: self.args.flag_tx_queue_size, + tx_queue_gas_limit: try!(to_gas_limit(&self.args.flag_tx_queue_gas)), pending_set: try!(to_pending_set(&self.args.flag_relay_set)), reseal_min_period: Duration::from_millis(self.args.flag_reseal_min_period), work_queue_size: self.args.flag_work_queue_size, @@ -445,23 +459,12 @@ impl Configuration { ret.min_peers = self.min_peers(); let mut net_path = PathBuf::from(self.directories().db); net_path.push("network"); - let net_specific_path = net_path.join(&try!(self.network_specific_path())); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); - ret.net_config_path = Some(net_specific_path.to_str().unwrap().to_owned()); ret.reserved_nodes = try!(self.init_reserved_nodes()); ret.allow_non_reserved = !self.args.flag_reserved_only; Ok(ret) } - fn network_specific_path(&self) -> Result { - let spec_type : SpecType = try!(self.chain().parse()); - let spec = try!(spec_type.spec()); - let id = try!(self.network_id()); - let mut path = PathBuf::new(); - path.push(format!("{}", id.unwrap_or_else(|| spec.network_id()))); - Ok(path) - } - fn network_id(&self) -> Result, String> { let net_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()); match net_id { @@ -717,6 +720,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), vm_type: VMType::Interpreter, }))); } @@ -737,6 +741,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), from_block: BlockID::Number(1), to_block: BlockID::Latest, }))); @@ -758,6 +763,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), from_block: BlockID::Number(1), to_block: BlockID::Latest, }))); @@ -804,6 +810,7 @@ mod tests { ui: false, name: "".into(), custom_bootnodes: false, + fat_db: Default::default(), no_periodic_snapshot: false, })); } diff --git a/parity/dir.rs b/parity/dir.rs index 158b5b2c5..5a87f8dac 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -110,6 +110,13 @@ impl DatabaseDirectories { dir.push("snapshot"); dir } + + /// Get the path for the network directory. + pub fn network_path(&self) -> PathBuf { + let mut dir = self.fork_path(); + dir.push("network"); + dir + } } #[cfg(test)] diff --git a/parity/helpers.rs b/parity/helpers.rs index 0649e7fe9..b8c4f3aa6 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -22,7 +22,7 @@ use std::fs::File; use util::{clean_0x, U256, Uint, Address, path, CompactionProfile}; use util::journaldb::Algorithm; use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig}; -use ethcore::miner::PendingSet; +use ethcore::miner::{PendingSet, GasLimit}; use cache::CacheConfig; use dir::DatabaseDirectories; use upgrade::upgrade; @@ -93,6 +93,14 @@ pub fn to_pending_set(s: &str) -> Result { } } +pub fn to_gas_limit(s: &str) -> Result { + match s { + "auto" => Ok(GasLimit::Auto), + "off" => Ok(GasLimit::None), + other => Ok(GasLimit::Fixed(try!(to_u256(other)))), + } +} + pub fn to_address(s: Option) -> Result { match s { Some(ref a) => clean_0x(a).parse().map_err(|_| format!("Invalid address: {:?}", a)), @@ -171,7 +179,7 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration { use ethsync::NetworkConfiguration; NetworkConfiguration { config_path: Some(replace_home("$HOME/.parity/network")), - net_config_path: Some(replace_home("$HOME/.parity/network/1")), + net_config_path: None, listen_address: Some("0.0.0.0:30303".into()), public_address: None, udp_port: None, @@ -191,6 +199,7 @@ pub fn to_client_config( cache_config: &CacheConfig, mode: Mode, tracing: bool, + fat_db: bool, compaction: DatabaseCompactionProfile, wal: bool, vm_type: VMType, @@ -217,6 +226,7 @@ pub fn to_client_config( client_config.mode = mode; client_config.tracing.enabled = tracing; + client_config.fat_db = fat_db; client_config.pruning = pruning; client_config.db_compaction = compaction; client_config.db_wal = wal; @@ -271,9 +281,10 @@ pub fn password_prompt() -> Result { pub fn password_from_file

(path: P) -> Result where P: AsRef { let mut file = try!(File::open(path).map_err(|_| "Unable to open password file.")); let mut file_content = String::new(); - try!(file.read_to_string(&mut file_content).map_err(|_| "Unable to read password file.")); - // remove eof - Ok((&file_content[..file_content.len() - 1]).to_owned()) + match file.read_to_string(&mut file_content) { + Ok(_) => Ok(file_content.trim().into()), + Err(_) => Err("Unable to read password file.".into()), + } } /// Reads passwords from files. Treats each line as a separate password. @@ -292,10 +303,13 @@ pub fn passwords_from_files(files: Vec) -> Result, String> { #[cfg(test)] mod tests { use std::time::Duration; + use std::fs::File; + use std::io::Write; + use devtools::RandomTempPath; use util::{U256}; use ethcore::client::{Mode, BlockID}; use ethcore::miner::PendingSet; - use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes}; + use super::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_address, to_addresses, to_price, geth_ipc_path, to_bootnodes, password_from_file}; #[test] fn test_to_duration() { @@ -378,6 +392,14 @@ mod tests { ); } + #[test] + fn test_password() { + let path = RandomTempPath::new(); + let mut file = File::create(path.as_path()).unwrap(); + file.write_all(b"a bc ").unwrap(); + assert_eq!(password_from_file(path).unwrap().as_bytes(), b"a bc"); + } + #[test] #[cfg_attr(feature = "dev", allow(float_cmp))] fn test_to_price() { diff --git a/parity/main.rs b/parity/main.rs index b74af7b3d..e0d6dfe36 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -196,6 +196,9 @@ fn sync_main() -> bool { } fn main() { + // Always print backtrace on panic. + ::std::env::set_var("RUST_BACKTRACE", "1"); + if sync_main() { return; } diff --git a/parity/migration.rs b/parity/migration.rs index ac96d0864..26bb606bc 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -19,6 +19,7 @@ use std::fs::File; use std::io::{Read, Write, Error as IoError, ErrorKind}; use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; +use std::sync::Arc; use util::journaldb::Algorithm; use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration}; use util::kvdb::{CompactionProfile, Database, DatabaseConfig}; @@ -29,7 +30,7 @@ use ethcore::migrations::Extract; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 9; +const CURRENT_VERSION: u32 = 10; /// First version of the consolidated database. const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once. @@ -43,13 +44,15 @@ pub enum Error { /// Returned when current version cannot be read or guessed. UnknownDatabaseVersion, /// Migration does not support existing pruning algorithm. - UnsuportedPruningMethod, + UnsupportedPruningMethod, /// Existing DB is newer than the known one. FutureDBVersion, /// Migration is not possible. MigrationImpossible, /// Migration unexpectadly failed. MigrationFailed, + /// Internal migration error. + Internal(MigrationError), /// Migration was completed succesfully, /// but there was a problem with io. Io(IoError), @@ -59,10 +62,11 @@ impl Display for Error { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { let out = match *self { Error::UnknownDatabaseVersion => "Current database version cannot be read".into(), - Error::UnsuportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(), + Error::UnsupportedPruningMethod => "Unsupported pruning method for database migration. Delete DB and resync.".into(), Error::FutureDBVersion => "Database was created with newer client version. Upgrade your client or delete DB and resync.".into(), Error::MigrationImpossible => format!("Database migration to version {} is not possible.", CURRENT_VERSION), Error::MigrationFailed => "Database migration unexpectedly failed".into(), + Error::Internal(ref err) => format!("{}", err), Error::Io(ref err) => format!("Unexpected io error on DB migration: {}.", err), }; @@ -80,7 +84,7 @@ impl From for Error { fn from(err: MigrationError) -> Self { match err { MigrationError::Io(e) => Error::Io(e), - _ => Error::MigrationFailed, + _ => Error::Internal(err), } } } @@ -140,7 +144,8 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig /// Migrations on the consolidated database. fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result { - let manager = MigrationManager::new(default_migration_settings(compaction_profile)); + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); + try!(manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)); Ok(manager) } @@ -160,7 +165,7 @@ fn consolidate_database( let config = default_migration_settings(compaction_profile); let mut db_config = DatabaseConfig { max_open_files: 64, - cache_size: None, + cache_sizes: Default::default(), compaction: config.compaction_profile, columns: None, wal: true, @@ -169,13 +174,13 @@ fn consolidate_database( let old_path_str = try!(old_db_path.to_str().ok_or(Error::MigrationImpossible)); let new_path_str = try!(new_db_path.to_str().ok_or(Error::MigrationImpossible)); - let cur_db = try!(Database::open(&db_config, old_path_str).map_err(db_error)); + let cur_db = Arc::new(try!(Database::open(&db_config, old_path_str).map_err(db_error))); // open new DB with proper number of columns db_config.columns = migration.columns(); let mut new_db = try!(Database::open(&db_config, new_path_str).map_err(db_error)); // Migrate to new database (default column only) - try!(migration.migrate(&cur_db, &config, &mut new_db, None)); + try!(migration.migrate(cur_db, &config, &mut new_db, None)); Ok(()) } @@ -320,7 +325,7 @@ mod legacy { let res = match pruning { Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), - _ => return Err(Error::UnsuportedPruningMethod), + _ => return Err(Error::UnsupportedPruningMethod), }; try!(res.map_err(|_| Error::MigrationImpossible)); diff --git a/parity/modules.rs b/parity/modules.rs index 53cef4741..39e05a293 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -68,8 +68,9 @@ pub type SyncModules = (Arc, Arc, Arc) #[cfg(feature="ipc")] mod ipc_deps { - pub use ethsync::{SyncClient, NetworkManagerClient, ServiceConfiguration}; - pub use ethcore::client::ChainNotifyClient; + pub use ethsync::remote::{SyncClient, NetworkManagerClient}; + pub use ethsync::ServiceConfiguration; + pub use ethcore::client::remote::ChainNotifyClient; pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL}; pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client}; pub use ipc::IpcSocket; diff --git a/parity/params.rs b/parity/params.rs index 71f702cfb..ee3038ebf 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -29,6 +29,7 @@ pub enum SpecType { Testnet, Olympic, Classic, + Expanse, Custom(String), } @@ -47,6 +48,7 @@ impl str::FromStr for SpecType { "frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic, "morden" | "testnet" => SpecType::Testnet, "olympic" => SpecType::Olympic, + "expanse" => SpecType::Expanse, other => SpecType::Custom(other.into()), }; Ok(spec) @@ -60,6 +62,7 @@ impl SpecType { SpecType::Testnet => Ok(ethereum::new_morden()), SpecType::Olympic => Ok(ethereum::new_olympic()), SpecType::Classic => Ok(ethereum::new_classic()), + SpecType::Expanse => Ok(ethereum::new_expanse()), SpecType::Custom(ref filename) => { let file = try!(fs::File::open(filename).map_err(|_| "Could not load specification file.")); Spec::load(file) @@ -139,7 +142,6 @@ impl str::FromStr for ResealPolicy { #[derive(Debug, PartialEq)] pub struct AccountsConfig { pub iterations: u32, - pub import_keys: bool, pub testnet: bool, pub password_files: Vec, pub unlocked_accounts: Vec

, @@ -149,7 +151,6 @@ impl Default for AccountsConfig { fn default() -> Self { AccountsConfig { iterations: 10240, - import_keys: false, testnet: false, password_files: Vec::new(), unlocked_accounts: Vec::new(), @@ -205,7 +206,7 @@ impl Default for MinerExtras { extra_data: version_data(), gas_floor_target: U256::from(4_700_000), gas_ceil_target: U256::from(6_283_184), - transactions_limit: 1024, + transactions_limit: 2048, } } } @@ -249,6 +250,20 @@ pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> R } } +pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, algorithm: Algorithm) -> Result { + let result = match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { + (false, Switch::On, false) => Err("FatDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + }; + + if result.clone().unwrap_or(false) && algorithm != Algorithm::Archive { + return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into()); + } + result +} + #[cfg(test)] mod tests { use util::journaldb::Algorithm; diff --git a/parity/run.rs b/parity/run.rs index e95b5c9f5..254765983 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -15,7 +15,6 @@ // along with Parity. If not, see . use std::sync::{Arc, Mutex, Condvar}; -use std::io::ErrorKind; use ctrlc::CtrlC; use fdlimit::raise_fd_limit; use ethcore_logger::{Config as LogConfig, setup_log}; @@ -28,14 +27,17 @@ use ethcore::service::ClientService; use ethcore::account_provider::AccountProvider; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::snapshot; -use ethsync::{SyncConfig, SyncProvider}; +use ethsync::SyncConfig; use informant::Informant; use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; use signer::SignerServer; use dapps::WebappServer; use io_handler::ClientIoHandler; -use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool}; +use params::{ + SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, + tracing_switch_to_bool, fatdb_switch_to_bool, +}; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use dir::Directories; use cache::CacheConfig; @@ -72,6 +74,7 @@ pub struct RunCmd { pub miner_extras: MinerExtras, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub compaction: DatabaseCompactionProfile, pub wal: bool, pub vm_type: VMType, @@ -115,11 +118,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // load user defaults let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); @@ -135,7 +141,17 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // display info about used pruning algorithm info!("Starting {}", Colour::White.bold().paint(version())); - info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str())); + info!("State DB configuation: {}{}{}", + Colour::White.bold().paint(algorithm.as_str()), + match fat_db { + true => Colour::White.bold().paint(" +Fat").to_string(), + false => "".to_owned(), + }, + match tracing { + true => Colour::White.bold().paint(" +Trace").to_string(), + false => "".to_owned(), + } + ); // display warning about using experimental journaldb alorithm if !algorithm.is_stable() { @@ -148,6 +164,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { Some(id) => id, None => spec.network_id(), }; + if spec.subprotocol_name().len() != 3 { + warn!("Your chain specification's subprotocol length is not 3. Ignoring."); + } else { + sync_config.subprotocol_name.clone_from_slice(spec.subprotocol_name().as_bytes()); + } sync_config.fork_block = spec.fork_block(); // prepare account provider @@ -166,6 +187,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { &cmd.cache_config, cmd.mode, tracing, + fat_db, cmd.compaction, cmd.wal, cmd.vm_type, @@ -179,6 +201,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { net_conf.boot_nodes = spec.nodes.clone(); } + // set network path. + net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + // create supervisor let mut hypervisor = modules::hypervisor(&cmd.dirs.ipc_path()); @@ -218,7 +243,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { let signer_path = cmd.signer_conf.signer_path.clone(); let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { signer_port: cmd.signer_port, - signer_service: Arc::new(rpc_apis::SignerService::new(move || signer::new_token(signer_path.clone()))), + signer_service: Arc::new(rpc_apis::SignerService::new(move || { + signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) + })), client: client.clone(), sync: sync_provider.clone(), net: manage_network.clone(), @@ -278,7 +305,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { let sync = sync_provider.clone(); let watcher = Arc::new(snapshot::Watcher::new( service.client(), - move || sync.status().is_major_syncing(), + move || ::ethsync::SyncProvider::status(&*sync).is_major_syncing(), service.io().channel(), SNAPSHOT_PERIOD, SNAPSHOT_HISTORY, @@ -335,28 +362,11 @@ fn daemonize(_pid_file: String) -> Result<(), String> { } fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result { - use ethcore::ethstore::{import_accounts, EthStore}; - use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory}; - use ethcore::ethstore::Error; + use ethcore::ethstore::EthStore; + use ethcore::ethstore::dir::DiskDirectory; let passwords = try!(passwords_from_files(cfg.password_files)); - if cfg.import_keys { - let t = if cfg.testnet { - DirectoryType::Testnet - } else { - DirectoryType::Main - }; - - let from = GethDirectory::open(t); - let to = try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e))); - match import_accounts(&from, &to) { - Ok(_) => {} - Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => {} - Err(err) => warn!("Import geth accounts failed. {}", err) - } - } - let dir = Box::new(try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e)))); let account_service = AccountProvider::new(Box::new( try!(EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e))) diff --git a/parity/signer.rs b/parity/signer.rs index b60bc7211..869c7fab5 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -74,7 +74,7 @@ pub fn new_token(path: String) -> Result { .map_err(|err| format!("Error generating token: {:?}", err)) } -fn generate_new_token(path: String) -> io::Result { +pub fn generate_new_token(path: String) -> io::Result { let path = codes_path(path); let mut codes = try!(signer::AuthCodes::from_file(&path)); let code = try!(codes.generate_new()); diff --git a/parity/snapshot.rs b/parity/snapshot.rs index f3a8a45d3..6b2efeed5 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -30,7 +30,7 @@ use ethcore::miner::Miner; use ethcore::ids::BlockID; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; @@ -57,6 +57,7 @@ pub struct SnapshotCommand { pub logger_config: LogConfig, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub compaction: DatabaseCompactionProfile, pub file_path: Option, pub wal: bool, @@ -139,9 +140,6 @@ impl SnapshotCommand { // load user defaults let user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); - // Setup logging let _logger = setup_log(&self.logger_config); @@ -150,6 +148,12 @@ impl SnapshotCommand { // select pruning algorithm let algorithm = self.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -158,7 +162,7 @@ impl SnapshotCommand { try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm); + let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/sync.rs b/parity/sync.rs index 85f771546..25f900b78 100644 --- a/parity/sync.rs +++ b/parity/sync.rs @@ -19,8 +19,9 @@ use std::sync::Arc; use std::sync::atomic::AtomicBool; use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL, ControlService}; -use ethcore::client::{RemoteClient, ChainNotify}; -use ethcore::snapshot::{RemoteSnapshotService}; +use ethcore::client::ChainNotify; +use ethcore::client::remote::RemoteClient; +use ethcore::snapshot::remote::RemoteSnapshotService; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use modules::service_urls; use boot; diff --git a/parity/user_defaults.rs b/parity/user_defaults.rs index 8a1feebae..b7fc3d929 100644 --- a/parity/user_defaults.rs +++ b/parity/user_defaults.rs @@ -30,6 +30,7 @@ pub struct UserDefaults { pub is_first_launch: bool, pub pruning: Algorithm, pub tracing: bool, + pub fat_db: bool, } impl Serialize for UserDefaults { @@ -38,6 +39,7 @@ impl Serialize for UserDefaults { let mut map: BTreeMap = BTreeMap::new(); map.insert("pruning".into(), Value::String(self.pruning.as_str().into())); map.insert("tracing".into(), Value::Bool(self.tracing)); + map.insert("fat_db".into(), Value::Bool(self.fat_db)); map.serialize(serializer) } } @@ -62,11 +64,14 @@ impl Visitor for UserDefaultsVisitor { let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method"))); let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing"))); let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value"))); + let fat_db: Value = map.remove("fat_db".into()).unwrap_or_else(|| Value::Bool(false)); + let fat_db = try!(fat_db.as_bool().ok_or_else(|| Error::custom("invalid fat_db value"))); let user_defaults = UserDefaults { is_first_launch: false, pruning: pruning, tracing: tracing, + fat_db: fat_db, }; Ok(user_defaults) @@ -79,6 +84,7 @@ impl Default for UserDefaults { is_first_launch: true, pruning: Algorithm::default(), tracing: false, + fat_db: false, } } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index c3f9cddbd..34b68fb81 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -25,6 +25,7 @@ ethsync = { path = "../sync" } ethjson = { path = "../json" } ethcore-devtools = { path = "../devtools" } rlp = { path = "../util/rlp" } +fetch = { path = "../util/fetch" } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.8.0", optional = true } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 7f2f11400..01ba44941 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -36,6 +36,7 @@ extern crate json_ipc_server as ipc; extern crate ethcore_ipc; extern crate time; extern crate rlp; +extern crate fetch; #[macro_use] extern crate log; diff --git a/rpc/src/v1/helpers/auto_args.rs b/rpc/src/v1/helpers/auto_args.rs index c7deb0436..ce1e6854a 100644 --- a/rpc/src/v1/helpers/auto_args.rs +++ b/rpc/src/v1/helpers/auto_args.rs @@ -31,16 +31,32 @@ use serde::{Serialize, Deserialize}; /// function `to_delegate` which will automatically wrap each strongly-typed /// function in a wrapper which handles parameter and output type serialization. /// -/// Every function must have a `#[name("rpc_nameHere")]` attribute after -/// its documentation, and no other attributes. All function names are -/// allowed except for `to_delegate`, which is auto-generated. +/// RPC functions may come in a couple forms: async and synchronous. +/// These are parsed with the custom `#[rpc]` attribute, which must follow +/// documentation. +/// +/// ## The #[rpc] attribute +/// +/// Valid forms: +/// - `#[rpc(name = "name_here")]` (a synchronous rpc function which should be bound to the given name) +/// - `#[rpc(async, name = "name_here")]` (an async rpc function which should be bound to the given name) +/// +/// Synchronous function format: +/// `fn foo(&self, Param1, Param2, Param3) -> Out`. +/// +/// Asynchronous RPC functions must come in this form: +/// `fn foo(&self, Param1, Param2, Param3, Ready); +/// +/// Anything else will be rejected by the code generator. macro_rules! build_rpc_trait { + // entry-point. todo: make another for traits w/ bounds. ( $(#[$t_attr: meta])* pub trait $name: ident { $( - $(#[doc=$m_doc: expr])* #[name($rpc_name: expr)] - fn $method: ident (&self $(, $param: ty)*) -> $out: ty; + $( #[doc=$m_doc:expr] )* + #[ rpc( $($t:tt)* ) ] + fn $m_name: ident ( $($p: tt)* ) $( -> Result<$out: ty, Error> )* ; )* } ) => { @@ -48,7 +64,7 @@ macro_rules! build_rpc_trait { pub trait $name: Sized + Send + Sync + 'static { $( $(#[doc=$m_doc])* - fn $method(&self $(, $param)*) -> $out; + fn $m_name ( $($p)* ) $( -> Result<$out, Error> )* ; )* /// Transform this into an `IoDelegate`, automatically wrapping @@ -56,14 +72,33 @@ macro_rules! build_rpc_trait { fn to_delegate(self) -> ::jsonrpc_core::IoDelegate { let mut del = ::jsonrpc_core::IoDelegate::new(self.into()); $( - del.add_method($rpc_name, move |base, params| { - ($name::$method as fn(&_ $(, $param)*) -> $out).wrap_rpc(base, params) - }); + build_rpc_trait!(WRAP del => + ( $($t)* ) + fn $m_name ( $($p)* ) $( -> Result<$out, Error> )* + ); )* del } } - } + }; + + ( WRAP $del: expr => + (name = $name: expr) + fn $method: ident (&self $(, $param: ty)*) -> Result<$out: ty, Error> + ) => { + $del.add_method($name, move |base, params| { + (Self::$method as fn(&_ $(, $param)*) -> Result<$out, Error>).wrap_rpc(base, params) + }) + }; + + ( WRAP $del: expr => + (async, name = $name: expr) + fn $method: ident (&self, Ready<$out: ty> $(, $param: ty)*) + ) => { + $del.add_async_method($name, move |base, params, ready| { + (Self::$method as fn(&_, Ready<$out> $(, $param)*)).wrap_rpc(base, params, ready) + }) + }; } /// A wrapper type without an implementation of `Deserialize` @@ -71,11 +106,35 @@ macro_rules! build_rpc_trait { /// that take a trailing default parameter. pub struct Trailing(pub T); +/// A wrapper type for `jsonrpc_core`'s weakly-typed `Ready` struct. +pub struct Ready { + inner: ::jsonrpc_core::Ready, + _marker: ::std::marker::PhantomData, +} + +impl From<::jsonrpc_core::Ready> for Ready { + fn from(ready: ::jsonrpc_core::Ready) -> Self { + Ready { inner: ready, _marker: ::std::marker::PhantomData } + } +} + +impl Ready { + /// Respond withthe asynchronous result. + pub fn ready(self, result: Result) { + self.inner.ready(result.map(to_value)) + } +} + /// Wrapper trait for synchronous RPC functions. pub trait Wrap { fn wrap_rpc(&self, base: &B, params: Params) -> Result; } +/// Wrapper trait for asynchronous RPC functions. +pub trait WrapAsync { + fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready); +} + // special impl for no parameters. impl Wrap for fn(&B) -> Result where B: Send + Sync + 'static, OUT: Serialize @@ -87,10 +146,23 @@ impl Wrap for fn(&B) -> Result } } +impl WrapAsync for fn(&B, Ready) + where B: Send + Sync + 'static, OUT: Serialize +{ + fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready) { + match ::v1::helpers::params::expect_no_params(params) { + Ok(()) => (self)(base, ready.into()), + Err(e) => ready.ready(Err(e)), + } + } +} + // creates a wrapper implementation which deserializes the parameters, // calls the function with concrete type, and serializes the output. macro_rules! wrap { ($($x: ident),+) => { + + // synchronous implementation impl < BASE: Send + Sync + 'static, OUT: Serialize, @@ -102,6 +174,20 @@ macro_rules! wrap { }).map(to_value) } } + + // asynchronous implementation + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + > WrapAsync for fn(&BASE, Ready, $($x,)+ ) { + fn wrap_rpc(&self, base: &BASE, params: Params, ready: ::jsonrpc_core::Ready) { + match from_params::<($($x,)+)>(params) { + Ok(($($x,)+)) => (self)(base, ready.into(), $($x,)+), + Err(e) => ready.ready(Err(e)), + } + } + } } } @@ -126,10 +212,34 @@ impl Wrap for fn(&B, Trailing) -> Result } } +impl WrapAsync for fn(&B, Ready, Trailing) + where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize +{ + fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready) { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return ready.ready(Err(errors::invalid_params("not an array", ""))), + }; + + let id = match len { + 0 => Ok((T::default(),)), + 1 => from_params::<(T,)>(params), + _ => Err(Error::invalid_params()), + }; + + match id { + Ok((id,)) => (self)(base, ready.into(), Trailing(id)), + Err(e) => ready.ready(Err(e)), + } + } +} + // similar to `wrap!`, but handles a single default trailing parameter // accepts an additional argument indicating the number of non-trailing parameters. macro_rules! wrap_with_trailing { ($num: expr, $($x: ident),+) => { + // synchronous implementation impl < BASE: Send + Sync + 'static, OUT: Serialize, @@ -155,6 +265,35 @@ macro_rules! wrap_with_trailing { (self)(base, $($x,)+ Trailing(id)).map(to_value) } } + + // asynchronous implementation + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + TRAILING: Default + Deserialize, + > WrapAsync for fn(&BASE, Ready, $($x,)+ Trailing) { + fn wrap_rpc(&self, base: &BASE, params: Params, ready: ::jsonrpc_core::Ready) { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return ready.ready(Err(errors::invalid_params("not an array", ""))), + }; + + let params = match len - $num { + 0 => from_params::<($($x,)+)>(params) + .map(|($($x,)+)| ($($x,)+ TRAILING::default())), + 1 => from_params::<($($x,)+ TRAILING)>(params) + .map(|($($x,)+ id)| ($($x,)+ id)), + _ => Err(Error::invalid_params()), + }; + + match params { + Ok(($($x,)+ id)) => (self)(base, ready.into(), $($x,)+ Trailing(id)), + Err(e) => ready.ready(Err(e)) + } + } + } } } diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 18e369208..0d7902897 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -21,8 +21,9 @@ macro_rules! rpc_unimplemented { } use std::fmt; -use ethcore::error::Error as EthcoreError; +use ethcore::error::{Error as EthcoreError, CallError}; use ethcore::account_provider::{Error as AccountError}; +use fetch::FetchError; use jsonrpc_core::{Error, ErrorCode, Value}; mod codes { @@ -33,6 +34,7 @@ mod codes { pub const NO_NEW_WORK: i64 = -32003; pub const UNKNOWN_ERROR: i64 = -32009; pub const TRANSACTION_ERROR: i64 = -32010; + pub const EXECUTION_ERROR: i64 = -32015; pub const ACCOUNT_LOCKED: i64 = -32020; pub const PASSWORD_INVALID: i64 = -32021; pub const ACCOUNT_ERROR: i64 = -32023; @@ -41,6 +43,7 @@ mod codes { pub const REQUEST_REJECTED_LIMIT: i64 = -32041; pub const REQUEST_NOT_FOUND: i64 = -32042; pub const COMPILATION_ERROR: i64 = -32050; + pub const FETCH_ERROR: i64 = -32060; } pub fn unimplemented() -> Error { @@ -107,6 +110,14 @@ pub fn invalid_params(param: &str, details: T) -> Error { } } +pub fn execution(data: T) -> Error { + Error { + code: ErrorCode::ServerError(codes::EXECUTION_ERROR), + message: "Transaction execution error.".into(), + data: Some(Value::String(format!("{:?}", data))), + } +} + pub fn state_pruned() -> Error { Error { code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), @@ -155,6 +166,14 @@ pub fn signer_disabled() -> Error { } } +pub fn from_fetch_error(error: FetchError) -> Error { + Error { + code: ErrorCode::ServerError(codes::FETCH_ERROR), + message: "Error while fetching content.".into(), + data: Some(Value::String(format!("{:?}", error))), + } +} + pub fn from_signing_error(error: AccountError) -> Error { Error { code: ErrorCode::ServerError(codes::ACCOUNT_LOCKED), @@ -179,13 +198,13 @@ pub fn from_transaction_error(error: EthcoreError) -> Error { AlreadyImported => "Transaction with the same hash was already imported.".into(), Old => "Transaction nonce is too low. Try incrementing the nonce.".into(), TooCheapToReplace => { - "Transaction fee is too low. There is another transaction with same nonce in the queue. Try increasing the fee or incrementing the nonce.".into() + "Transaction gas price is too low. There is another transaction with same nonce in the queue. Try increasing the gas price or incrementing the nonce.".into() }, LimitReached => { "There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.".into() }, InsufficientGasPrice { minimal, got } => { - format!("Transaction fee is too low. It does not satisfy your node's minimal fee (minimal: {}, got: {}). Try increasing the fee.", minimal, got) + format!("Transaction gas price is too low. It does not satisfy your node's minimal gas price (minimal: {}, got: {}). Try increasing the gas price.", minimal, got) }, InsufficientBalance { balance, cost } => { format!("Insufficient funds. Account you try to send transaction from does not have enough funds. Required {} and got: {}.", cost, balance) @@ -209,4 +228,10 @@ pub fn from_transaction_error(error: EthcoreError) -> Error { } } - +pub fn from_call_error(error: CallError) -> Error { + match error { + CallError::StatePruned => state_pruned(), + CallError::Execution(e) => execution(e), + CallError::TransactionNotFound => internal("{}, this should not be the case with eth_call, most likely a bug.", CallError::TransactionNotFound), + } +} diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 755539ebd..c13229222 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -33,7 +33,7 @@ use util::{FromHex, Mutex}; use rlp::{self, UntrustedRlp, View}; use ethcore::account_provider::AccountProvider; use ethcore::client::{MiningBlockChainClient, BlockID, TransactionID, UncleID}; -use ethcore::header::Header as BlockHeader; +use ethcore::header::{Header as BlockHeader, BlockNumber as EthBlockNumber}; use ethcore::block::IsBlock; use ethcore::views::*; use ethcore::ethereum::Ethash; @@ -198,8 +198,8 @@ impl EthClient where } } -pub fn pending_logs(miner: &M, filter: &EthcoreFilter) -> Vec where M: MinerService { - let receipts = miner.pending_receipts(); +pub fn pending_logs(miner: &M, best_block: EthBlockNumber, filter: &EthcoreFilter) -> Vec where M: MinerService { + let receipts = miner.pending_receipts(best_block); let pending_logs = receipts.into_iter() .flat_map(|(hash, r)| r.logs.into_iter().map(|l| (hash.clone(), l)).collect::>()) @@ -424,13 +424,10 @@ impl Eth for EthClient where fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - - let miner = take_weak!(self.miner); let hash: H256 = hash.into(); - match miner.transaction(&hash) { - Some(pending_tx) => Ok(Some(pending_tx.into())), - None => self.transaction(TransactionID::Hash(hash)) - } + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + Ok(try!(self.transaction(TransactionID::Hash(hash))).or_else(|| miner.transaction(client.chain_info().best_block_number, &hash).map(Into::into))) } fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { @@ -449,8 +446,9 @@ impl Eth for EthClient where try!(self.active()); let miner = take_weak!(self.miner); + let best_block = take_weak!(self.client).chain_info().best_block_number; let hash: H256 = hash.into(); - match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) { + match (miner.pending_receipt(best_block, &hash), self.options.allow_pending_receipt_query) { (Some(receipt), true) => Ok(Some(receipt.into())), _ => { let client = take_weak!(self.client); @@ -492,7 +490,8 @@ impl Eth for EthClient where .collect::>(); if include_pending { - let pending = pending_logs(&*take_weak!(self.miner), &filter); + let best_block = take_weak!(self.client).chain_info().best_block_number; + let pending = pending_logs(&*take_weak!(self.miner), best_block, &filter); logs.extend(pending); } @@ -594,7 +593,10 @@ impl Eth for EthClient where num => take_weak!(self.client).call(&signed, num.into(), Default::default()), }; - Ok(r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![]))) + match r { + Ok(b) => Ok(Bytes(b.output)), + Err(e) => Err(errors::from_call_error(e)), + } } fn estimate_gas(&self, request: CallRequest, num: Trailing) -> Result { diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index 03d9d7215..dd1c937ac 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -81,7 +81,8 @@ impl EthFilter for EthFilterClient try!(self.active()); let mut polls = self.polls.lock(); - let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); + let best_block = take_weak!(self.client).chain_info().best_block_number; + let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(best_block); let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); Ok(id.into()) } @@ -108,7 +109,8 @@ impl EthFilter for EthFilterClient }, PollFilter::PendingTransaction(ref mut previous_hashes) => { // get hashes of pending transactions - let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); + let best_block = take_weak!(self.client).chain_info().best_block_number; + let current_hashes = take_weak!(self.miner).pending_transactions_hashes(best_block); let new_hashes = { @@ -149,7 +151,8 @@ impl EthFilter for EthFilterClient // additionally retrieve pending logs if include_pending { - let pending_logs = pending_logs(&*take_weak!(self.miner), &filter); + let best_block = take_weak!(self.client).chain_info().best_block_number; + let pending_logs = pending_logs(&*take_weak!(self.miner), best_block, &filter); // remove logs about which client was already notified about let new_pending_logs: Vec<_> = pending_logs.iter() @@ -190,7 +193,8 @@ impl EthFilter for EthFilterClient .collect::>(); if include_pending { - logs.extend(pending_logs(&*take_weak!(self.miner), &filter)); + let best_block = take_weak!(self.client).chain_info().best_block_number; + logs.extend(pending_logs(&*take_weak!(self.miner), best_block, &filter)); } let logs = limit_logs(logs, filter.limit); diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index 220ead3dd..a63d33f52 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -15,30 +15,34 @@ // along with Parity. If not, see . //! Ethcore-specific rpc implementation. -use std::sync::{Arc, Weak}; +use std::{fs, io}; +use std::sync::{mpsc, Arc, Weak}; use std::str::FromStr; -use std::collections::{BTreeMap}; -use util::{RotatingLogger, Address}; + +use util::{RotatingLogger, Address, Mutex, sha3}; use util::misc::version_data; use crypto::ecies; +use fetch::{Client as FetchClient, Fetch}; use ethkey::{Brain, Generator}; use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; use ethcore::miner::MinerService; use ethcore::client::{MiningBlockChainClient}; +use ethcore::ids::BlockID; -use jsonrpc_core::*; +use jsonrpc_core::Error; use v1::traits::Ethcore; -use v1::types::{Bytes, U256, H160, H512, Peers, Transaction}; +use v1::types::{Bytes, U256, H160, H256, H512, Peers, Transaction, RpcSettings}; use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; -use v1::helpers::params::expect_no_params; +use v1::helpers::auto_args::Ready; /// Ethcore implementation. -pub struct EthcoreClient where +pub struct EthcoreClient where C: MiningBlockChainClient, M: MinerService, - S: SyncProvider { + S: SyncProvider, + F: Fetch { client: Weak, miner: Weak, @@ -47,10 +51,14 @@ pub struct EthcoreClient where logger: Arc, settings: Arc, signer: Option>, + fetch: Mutex } -impl EthcoreClient where C: MiningBlockChainClient, M: MinerService, S: SyncProvider { - /// Creates new `EthcoreClient`. +impl EthcoreClient where + C: MiningBlockChainClient, + M: MinerService, + S: SyncProvider, { + /// Creates new `EthcoreClient` with default `Fetch`. pub fn new( client: &Arc, miner: &Arc, @@ -60,6 +68,26 @@ impl EthcoreClient where C: MiningBlockChainClient, M: settings: Arc, signer: Option> ) -> Self { + Self::with_fetch(client, miner, sync, net, logger, settings, signer) + } +} + +impl EthcoreClient where + C: MiningBlockChainClient, + M: MinerService, + S: SyncProvider, + F: Fetch, { + + /// Creates new `EthcoreClient` with customizable `Fetch`. + pub fn with_fetch( + client: &Arc, + miner: &Arc, + sync: &Arc, + net: &Arc, + logger: Arc, + settings: Arc, + signer: Option> + ) -> Self { EthcoreClient { client: Arc::downgrade(client), miner: Arc::downgrade(miner), @@ -68,6 +96,7 @@ impl EthcoreClient where C: MiningBlockChainClient, M: logger: logger, settings: settings, signer: signer, + fetch: Mutex::new(F::default()), } } @@ -78,159 +107,207 @@ impl EthcoreClient where C: MiningBlockChainClient, M: } } -impl Ethcore for EthcoreClient where M: MinerService + 'static, C: MiningBlockChainClient + 'static, S: SyncProvider + 'static { +impl Ethcore for EthcoreClient where + M: MinerService + 'static, + C: MiningBlockChainClient + 'static, + S: SyncProvider + 'static, + F: Fetch + 'static { - fn transactions_limit(&self, params: Params) -> Result { + fn transactions_limit(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&take_weak!(self.miner).transactions_limit())) + + Ok(take_weak!(self.miner).transactions_limit()) } - fn min_gas_price(&self, params: Params) -> Result { + fn min_gas_price(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&U256::from(take_weak!(self.miner).minimal_gas_price()))) + + Ok(U256::from(take_weak!(self.miner).minimal_gas_price())) } - fn extra_data(&self, params: Params) -> Result { + fn extra_data(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&Bytes::new(take_weak!(self.miner).extra_data()))) + + Ok(Bytes::new(take_weak!(self.miner).extra_data())) } - fn gas_floor_target(&self, params: Params) -> Result { + fn gas_floor_target(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&U256::from(take_weak!(self.miner).gas_floor_target()))) + + Ok(U256::from(take_weak!(self.miner).gas_floor_target())) } - fn gas_ceil_target(&self, params: Params) -> Result { + fn gas_ceil_target(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&U256::from(take_weak!(self.miner).gas_ceil_target()))) + + Ok(U256::from(take_weak!(self.miner).gas_ceil_target())) } - fn dev_logs(&self, params: Params) -> Result { + fn dev_logs(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); + let logs = self.logger.logs(); - Ok(to_value(&logs.as_slice())) + Ok(logs.as_slice().to_owned()) } - fn dev_logs_levels(&self, params: Params) -> Result { + fn dev_logs_levels(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.logger.levels())) + + Ok(self.logger.levels().to_owned()) } - fn net_chain(&self, params: Params) -> Result { + fn net_chain(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.settings.chain)) + + Ok(self.settings.chain.clone()) } - fn net_peers(&self, params: Params) -> Result { + fn net_peers(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let sync_status = take_weak!(self.sync).status(); let net_config = take_weak!(self.net).network_config(); - Ok(to_value(&Peers { + Ok(Peers { active: sync_status.num_active_peers, connected: sync_status.num_peers, max: sync_status.current_max_peers(net_config.min_peers, net_config.max_peers), - })) + }) } - fn net_port(&self, params: Params) -> Result { + fn net_port(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.settings.network_port)) + + Ok(self.settings.network_port) } - fn node_name(&self, params: Params) -> Result { + fn node_name(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.settings.name)) + + Ok(self.settings.name.clone()) } - fn registry_address(&self, params: Params) -> Result { + fn registry_address(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); - let r = take_weak!(self.client) - .additional_params() - .get("registrar") - .and_then(|s| Address::from_str(s).ok()) - .map(|s| H160::from(s)); - Ok(to_value(&r)) + + Ok( + take_weak!(self.client) + .additional_params() + .get("registrar") + .and_then(|s| Address::from_str(s).ok()) + .map(|s| H160::from(s)) + ) } - fn rpc_settings(&self, params: Params) -> Result { + fn rpc_settings(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - let mut map = BTreeMap::new(); - map.insert("enabled".to_owned(), Value::Bool(self.settings.rpc_enabled)); - map.insert("interface".to_owned(), Value::String(self.settings.rpc_interface.clone())); - map.insert("port".to_owned(), Value::U64(self.settings.rpc_port as u64)); - Ok(Value::Object(map)) + Ok(RpcSettings { + enabled: self.settings.rpc_enabled, + interface: self.settings.rpc_interface.clone(), + port: self.settings.rpc_port as u64, + }) } - fn default_extra_data(&self, params: Params) -> Result { + fn default_extra_data(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&Bytes::new(version_data()))) + + Ok(Bytes::new(version_data())) } - fn gas_price_statistics(&self, params: Params) -> Result { + fn gas_price_statistics(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); match take_weak!(self.client).gas_price_statistics(100, 8) { - Ok(stats) => Ok(to_value(&stats - .into_iter() - .map(|x| to_value(&U256::from(x))) - .collect::>())), + Ok(stats) => Ok(stats.into_iter().map(Into::into).collect()), _ => Err(Error::internal_error()), } } - fn unsigned_transactions_count(&self, params: Params) -> Result { + fn unsigned_transactions_count(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); match self.signer { None => Err(errors::signer_disabled()), - Some(ref signer) => Ok(to_value(&signer.len())), + Some(ref signer) => Ok(signer.len()), } } - fn generate_secret_phrase(&self, params: Params) -> Result { + fn generate_secret_phrase(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&random_phrase(12))) + Ok(random_phrase(12)) } - fn phrase_to_address(&self, params: Params) -> Result { + fn phrase_to_address(&self, phrase: String) -> Result { try!(self.active()); - from_params::<(String,)>(params).map(|(phrase,)| - to_value(&H160::from(Brain::new(phrase).generate().unwrap().address())) - ) + + Ok(Brain::new(phrase).generate().unwrap().address().into()) } - fn encrypt_message(&self, params: Params) -> Result { + fn list_accounts(&self) -> Result>, Error> { try!(self.active()); - from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| { - let s = try!(ecies::encrypt(&key.into(), &[0; 0], &phrase.0).map_err(|_| Error::internal_error())); - Ok(to_value(&Bytes::from(s))) - }) + + Ok(take_weak!(self.client) + .list_accounts(BlockID::Latest) + .map(|a| a.into_iter().map(Into::into).collect())) } - fn pending_transactions(&self, params: Params) -> Result { + fn list_storage_keys(&self, _address: H160) -> Result>, Error> { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::>())) + // TODO: implement this + Ok(None) + } + + fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { + try!(self.active()); + + ecies::encrypt(&key.into(), &[0; 0], &phrase.0) + .map_err(|_| Error::internal_error()) + .map(Into::into) + } + + fn pending_transactions(&self) -> Result, Error> { + try!(self.active()); + + Ok(take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::>()) + } + + fn hash_content(&self, ready: Ready, url: String) { + let res = self.active(); + + let hash_content = |result| { + let path = try!(result); + let mut file = io::BufReader::new(try!(fs::File::open(&path))); + // Try to hash + let result = sha3(&mut file); + // Remove file (always) + try!(fs::remove_file(&path)); + // Return the result + Ok(try!(result)) + }; + + match res { + Err(e) => ready.ready(Err(e)), + Ok(()) => { + let (tx, rx) = mpsc::channel(); + let res = self.fetch.lock().request_async(&url, Default::default(), Box::new(move |result| { + let result = hash_content(result) + .map_err(errors::from_fetch_error) + .map(Into::into); + + // Receive ready and invoke with result. + let ready: Ready = rx.try_recv().expect("When on_done is invoked ready object is always sent."); + ready.ready(result); + })); + + // Either invoke ready right away or transfer it to the closure. + if let Err(e) = res { + ready.ready(Err(errors::from_fetch_error(e))); + } else { + tx.send(ready).expect("Rx end is sent to on_done closure."); + } + } + } } } diff --git a/rpc/src/v1/impls/ethcore_set.rs b/rpc/src/v1/impls/ethcore_set.rs index 35b97f785..7bebf9bbb 100644 --- a/rpc/src/v1/impls/ethcore_set.rs +++ b/rpc/src/v1/impls/ethcore_set.rs @@ -21,7 +21,6 @@ use ethcore::miner::MinerService; use ethcore::client::MiningBlockChainClient; use ethsync::ManageNetwork; use v1::helpers::errors; -use v1::helpers::params::expect_no_params; use v1::traits::EthcoreSet; use v1::types::{Bytes, H160, U256}; @@ -58,105 +57,94 @@ impl EthcoreSet for EthcoreSetClient where C: MiningBlockChainClient + 'static, M: MinerService + 'static { - fn set_min_gas_price(&self, params: Params) -> Result { + fn set_min_gas_price(&self, gas_price: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(gas_price,)| { - take_weak!(self.miner).set_minimal_gas_price(gas_price.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_minimal_gas_price(gas_price.into()); + Ok(true) } - fn set_gas_floor_target(&self, params: Params) -> Result { + fn set_gas_floor_target(&self, target: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(target,)| { - take_weak!(self.miner).set_gas_floor_target(target.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_gas_floor_target(target.into()); + Ok(true) } - fn set_gas_ceil_target(&self, params: Params) -> Result { + fn set_gas_ceil_target(&self, target: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(target,)| { - take_weak!(self.miner).set_gas_ceil_target(target.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_gas_ceil_target(target.into()); + Ok(true) } - fn set_extra_data(&self, params: Params) -> Result { + fn set_extra_data(&self, extra_data: Bytes) -> Result { try!(self.active()); - from_params::<(Bytes,)>(params).and_then(|(extra_data,)| { - take_weak!(self.miner).set_extra_data(extra_data.to_vec()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_extra_data(extra_data.to_vec()); + Ok(true) } - fn set_author(&self, params: Params) -> Result { + fn set_author(&self, author: H160) -> Result { try!(self.active()); - from_params::<(H160,)>(params).and_then(|(author,)| { - take_weak!(self.miner).set_author(author.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_author(author.into()); + Ok(true) } - fn set_transactions_limit(&self, params: Params) -> Result { + fn set_transactions_limit(&self, limit: usize) -> Result { try!(self.active()); - from_params::<(usize,)>(params).and_then(|(limit,)| { - take_weak!(self.miner).set_transactions_limit(limit); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_transactions_limit(limit); + Ok(true) } - fn set_tx_gas_limit(&self, params: Params) -> Result { + fn set_tx_gas_limit(&self, limit: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(limit,)| { - take_weak!(self.miner).set_tx_gas_limit(limit.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_tx_gas_limit(limit.into()); + Ok(true) } - fn add_reserved_peer(&self, params: Params) -> Result { + fn add_reserved_peer(&self, peer: String) -> Result { try!(self.active()); - from_params::<(String,)>(params).and_then(|(peer,)| { - match take_weak!(self.net).add_reserved_peer(peer) { - Ok(()) => Ok(to_value(&true)), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - }) + + match take_weak!(self.net).add_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } } - fn remove_reserved_peer(&self, params: Params) -> Result { + fn remove_reserved_peer(&self, peer: String) -> Result { try!(self.active()); - from_params::<(String,)>(params).and_then(|(peer,)| { - match take_weak!(self.net).remove_reserved_peer(peer) { - Ok(()) => Ok(to_value(&true)), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - }) + + match take_weak!(self.net).remove_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } } - fn drop_non_reserved_peers(&self, params: Params) -> Result { + fn drop_non_reserved_peers(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); + take_weak!(self.net).deny_unreserved_peers(); - Ok(to_value(&true)) + Ok(true) } - fn accept_non_reserved_peers(&self, params: Params) -> Result { + fn accept_non_reserved_peers(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); + take_weak!(self.net).accept_unreserved_peers(); - Ok(to_value(&true)) + Ok(true) } - fn start_network(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn start_network(&self) -> Result { take_weak!(self.net).start_network(); - Ok(Value::Bool(true)) + Ok(true) } - fn stop_network(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn stop_network(&self) -> Result { take_weak!(self.net).stop_network(); - Ok(Value::Bool(true)) + Ok(true) } } diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 9c22a3638..f0e836fb7 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -16,10 +16,9 @@ //! Net rpc implementation. use std::sync::{Arc, Weak}; -use jsonrpc_core::*; +use jsonrpc_core::Error; use ethsync::SyncProvider; use v1::traits::Net; -use v1::helpers::params::expect_no_params; /// Net rpc implementation. pub struct NetClient where S: SyncProvider { @@ -36,20 +35,19 @@ impl NetClient where S: SyncProvider { } impl Net for NetClient where S: SyncProvider + 'static { - fn version(&self, params: Params) -> Result { - try!(expect_no_params(params)); - Ok(Value::String(format!("{}", take_weak!(self.sync).status().network_id).to_owned())) + fn version(&self) -> Result { + Ok(format!("{}", take_weak!(self.sync).status().network_id).to_owned()) } - fn peer_count(&self, params: Params) -> Result { - try!(expect_no_params(params)); - Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned())) + fn peer_count(&self) -> Result { + Ok(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned()) } - fn is_listening(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn is_listening(&self) -> Result { // right now (11 march 2016), we are always listening for incoming connections - Ok(Value::Bool(true)) + // + // (this may not be true now -- 26 september 2016) + Ok(true) } } diff --git a/rpc/src/v1/impls/rpc.rs b/rpc/src/v1/impls/rpc.rs index fafc92fe5..7f92c1ed9 100644 --- a/rpc/src/v1/impls/rpc.rs +++ b/rpc/src/v1/impls/rpc.rs @@ -16,9 +16,8 @@ //! RPC generic methods implementation. use std::collections::BTreeMap; -use jsonrpc_core::*; +use jsonrpc_core::Error; use v1::traits::Rpc; -use v1::helpers::params::expect_no_params; /// RPC generic methods implementation. pub struct RpcClient { @@ -40,26 +39,26 @@ impl RpcClient { } impl Rpc for RpcClient { - fn rpc_modules(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn rpc_modules(&self) -> Result, Error> { let modules = self.modules.iter() .fold(BTreeMap::new(), |mut map, (k, v)| { - map.insert(k.to_owned(), Value::String(v.to_owned())); + map.insert(k.to_owned(), v.to_owned()); map }); - Ok(Value::Object(modules)) + + Ok(modules) } - fn modules(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn modules(&self) -> Result, Error> { let modules = self.modules.iter() .filter(|&(k, _v)| { self.valid_apis.contains(k) }) .fold(BTreeMap::new(), |mut map, (k, v)| { - map.insert(k.to_owned(), Value::String(v.to_owned())); + map.insert(k.to_owned(), v.to_owned()); map }); - Ok(Value::Object(modules)) + + Ok(modules) } } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 448fa4734..97e4d3bea 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -24,7 +24,7 @@ use ethcore::spec::{Genesis, Spec}; use ethcore::block::Block; use ethcore::views::BlockView; use ethcore::ethereum; -use ethcore::miner::{MinerOptions, GasPricer, MinerService, ExternalMiner, Miner, PendingSet}; +use ethcore::miner::{MinerOptions, GasPricer, MinerService, ExternalMiner, Miner, PendingSet, GasLimit}; use ethcore::account_provider::AccountProvider; use devtools::RandomTempPath; use util::Hashable; @@ -58,6 +58,7 @@ fn miner_service(spec: &Spec, accounts: Arc) -> Arc { reseal_on_own_tx: true, tx_queue_size: 1024, tx_gas_limit: !U256::zero(), + tx_queue_gas_limit: GasLimit::None, pending_set: PendingSet::SealingOrElseQueue, reseal_min_period: Duration::from_secs(0), work_queue_size: 50, diff --git a/rpc/src/v1/tests/helpers/fetch.rs b/rpc/src/v1/tests/helpers/fetch.rs new file mode 100644 index 000000000..98d888a10 --- /dev/null +++ b/rpc/src/v1/tests/helpers/fetch.rs @@ -0,0 +1,44 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Test implementation of fetch client. + +use std::io::Write; +use std::{env, fs, thread}; +use std::sync::Arc; +use std::sync::atomic::AtomicBool; +use fetch::{Fetch, FetchError, FetchResult}; + +/// Test implementation of fetcher. Will always return the same file. +#[derive(Default)] +pub struct TestFetch; + +impl Fetch for TestFetch { + fn request_async(&mut self, _url: &str, _abort: Arc, on_done: Box) -> Result<(), FetchError> { + thread::spawn(move || { + let mut path = env::temp_dir(); + path.push(Self::random_filename()); + + let mut file = fs::File::create(&path).unwrap(); + file.write_all(b"Some content").unwrap(); + + on_done(Ok(path)); + }); + Ok(()) + } +} + + diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 0f36b4f54..0787f2102 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -21,6 +21,7 @@ use util::standard::*; use ethcore::error::{Error, CallError}; use ethcore::client::{MiningBlockChainClient, Executed, CallAnalytics}; use ethcore::block::{ClosedBlock, IsBlock}; +use ethcore::header::BlockNumber; use ethcore::transaction::SignedTransaction; use ethcore::receipt::{Receipt, RichReceipt}; use ethcore::miner::{MinerService, MinerStatus, TransactionImportResult}; @@ -162,7 +163,7 @@ impl MinerService for TestMinerService { } /// Returns hashes of transactions currently in pending - fn pending_transactions_hashes(&self) -> Vec { + fn pending_transactions_hashes(&self, _best_block: BlockNumber) -> Vec { vec![] } @@ -186,7 +187,7 @@ impl MinerService for TestMinerService { Some(f(&open_block.close())) } - fn transaction(&self, hash: &H256) -> Option { + fn transaction(&self, _best_block: BlockNumber, hash: &H256) -> Option { self.pending_transactions.lock().get(hash).cloned() } @@ -194,13 +195,13 @@ impl MinerService for TestMinerService { self.pending_transactions.lock().values().cloned().collect() } - fn pending_transactions(&self) -> Vec { + fn pending_transactions(&self, _best_block: BlockNumber) -> Vec { self.pending_transactions.lock().values().cloned().collect() } - fn pending_receipt(&self, hash: &H256) -> Option { + fn pending_receipt(&self, _best_block: BlockNumber, hash: &H256) -> Option { // Not much point implementing this since the logic is complex and the only thing it relies on is pending_receipts, which is already tested. - self.pending_receipts().get(hash).map(|r| + self.pending_receipts(0).get(hash).map(|r| RichReceipt { transaction_hash: Default::default(), transaction_index: Default::default(), @@ -212,7 +213,7 @@ impl MinerService for TestMinerService { ) } - fn pending_receipts(&self) -> BTreeMap { + fn pending_receipts(&self, _best_block: BlockNumber) -> BTreeMap { self.pending_receipts.lock().clone() } @@ -249,7 +250,7 @@ impl MinerService for TestMinerService { } fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { - self.latest_closed_block.lock().as_ref().map_or(None, |b| b.block().fields().state.code(address).clone()) + self.latest_closed_block.lock().as_ref().map_or(None, |b| b.block().fields().state.code(address).map(|c| (*c).clone())) } } diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs index 1b8f9e256..234bae1be 100644 --- a/rpc/src/v1/tests/helpers/mod.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -18,6 +18,8 @@ mod sync_provider; mod miner_service; +mod fetch; pub use self::sync_provider::{Config, TestSyncProvider}; pub use self::miner_service::TestMinerService; +pub use self::fetch::TestFetch; diff --git a/rpc/src/v1/tests/mocked/ethcore.rs b/rpc/src/v1/tests/mocked/ethcore.rs index 811ccced4..3dc02e929 100644 --- a/rpc/src/v1/tests/mocked/ethcore.rs +++ b/rpc/src/v1/tests/mocked/ethcore.rs @@ -23,7 +23,7 @@ use ethcore::client::{TestBlockChainClient}; use jsonrpc_core::IoHandler; use v1::{Ethcore, EthcoreClient}; use v1::helpers::{SignerService, NetworkSettings}; -use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; +use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService, TestFetch}; use super::manage_network::TestManageNetwork; fn miner_service() -> Arc { @@ -60,12 +60,15 @@ fn network_service() -> Arc { Arc::new(TestManageNetwork) } +type TestEthcoreClient = EthcoreClient; + fn ethcore_client( client: &Arc, miner: &Arc, sync: &Arc, - net: &Arc) -> EthcoreClient { - EthcoreClient::new(client, miner, sync, net, logger(), settings(), None) + net: &Arc) + -> TestEthcoreClient { + EthcoreClient::with_fetch(client, miner, sync, net, logger(), settings(), None) } #[test] @@ -140,9 +143,9 @@ fn rpc_ethcore_dev_logs() { let logger = logger(); logger.append("a".to_owned()); logger.append("b".to_owned()); - let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger.clone(), settings(), None).to_delegate(); + let ethcore: TestEthcoreClient = EthcoreClient::with_fetch(&client, &miner, &sync, &net, logger.clone(), settings(), None); let io = IoHandler::new(); - io.add_delegate(ethcore); + io.add_delegate(ethcore.to_delegate()); let request = r#"{"jsonrpc": "2.0", "method": "ethcore_devLogs", "params":[], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":["b","a"],"id":1}"#; @@ -263,8 +266,8 @@ fn rpc_ethcore_unsigned_transactions_count() { let net = network_service(); let io = IoHandler::new(); let signer = Arc::new(SignerService::new_test()); - let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(signer)).to_delegate(); - io.add_delegate(ethcore); + let ethcore: TestEthcoreClient = EthcoreClient::with_fetch(&client, &miner, &sync, &net, logger(), settings(), Some(signer)); + io.add_delegate(ethcore.to_delegate()); let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; @@ -287,6 +290,21 @@ fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() { assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } +#[test] +fn rpc_ethcore_hash_content() { + let miner = miner_service(); + let client = client_service(); + let sync = sync_provider(); + let net = network_service(); + let io = IoHandler::new(); + io.add_delegate(ethcore_client(&client, &miner, &sync, &net).to_delegate()); + + let request = r#"{"jsonrpc": "2.0", "method": "ethcore_hashContent", "params":["https://ethcore.io/assets/images/ethcore-black-horizontal.png"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x2be00befcf008bc0e7d9cdefc194db9c75352e8632f48498b5a6bfce9f02c88e","id":1}"#; + + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); +} + #[test] fn rpc_ethcore_pending_transactions() { let miner = miner_service(); diff --git a/rpc/src/v1/tests/mocked/ethcore_set.rs b/rpc/src/v1/tests/mocked/ethcore_set.rs index eaa4cb0cb..e87d49b8c 100644 --- a/rpc/src/v1/tests/mocked/ethcore_set.rs +++ b/rpc/src/v1/tests/mocked/ethcore_set.rs @@ -115,4 +115,4 @@ fn rpc_ethcore_set_transactions_limit() { assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); assert_eq!(miner.transactions_limit(), 10_240_240); -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 80789fd0e..62301e21f 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -28,174 +28,173 @@ build_rpc_trait! { /// Eth rpc interface. pub trait Eth { /// Returns protocol version encoded as a string (quotes are necessary). - #[name("eth_protocolVersion")] + #[rpc(name = "eth_protocolVersion")] fn protocol_version(&self) -> Result; /// Returns an object with data about the sync status or false. (wtf?) - #[name("eth_syncing")] + #[rpc(name = "eth_syncing")] fn syncing(&self) -> Result; /// Returns the number of hashes per second that the node is mining with. - #[name("eth_hashrate")] + #[rpc(name = "eth_hashrate")] fn hashrate(&self) -> Result; /// Returns block author. - #[name("eth_coinbase")] + #[rpc(name = "eth_coinbase")] fn author(&self) -> Result; /// Returns true if client is actively mining new blocks. - #[name("eth_mining")] + #[rpc(name = "eth_mining")] fn is_mining(&self) -> Result; /// Returns current gas_price. - #[name("eth_gasPrice")] + #[rpc(name = "eth_gasPrice")] fn gas_price(&self) -> Result; /// Returns accounts list. - #[name("eth_accounts")] + #[rpc(name = "eth_accounts")] fn accounts(&self) -> Result, Error>; /// Returns highest block number. - #[name("eth_blockNumber")] + #[rpc(name = "eth_blockNumber")] fn block_number(&self) -> Result; /// Returns balance of the given account. - #[name("eth_getBalance")] + #[rpc(name = "eth_getBalance")] fn balance(&self, H160, Trailing) -> Result; /// Returns content of the storage at given address. - #[name("eth_getStorageAt")] + #[rpc(name = "eth_getStorageAt")] fn storage_at(&self, H160, U256, Trailing) -> Result; /// Returns block with given hash. - #[name("eth_getBlockByHash")] + #[rpc(name = "eth_getBlockByHash")] fn block_by_hash(&self, H256, bool) -> Result, Error>; /// Returns block with given number. - #[name("eth_getBlockByNumber")] + #[rpc(name = "eth_getBlockByNumber")] fn block_by_number(&self, BlockNumber, bool) -> Result, Error>; /// Returns the number of transactions sent from given address at given time (block number). - #[name("eth_getTransactionCount")] + #[rpc(name = "eth_getTransactionCount")] fn transaction_count(&self, H160, Trailing) -> Result; /// Returns the number of transactions in a block with given hash. - #[name("eth_getBlockTransactionCountByHash")] + #[rpc(name = "eth_getBlockTransactionCountByHash")] fn block_transaction_count_by_hash(&self, H256) -> Result, Error>; /// Returns the number of transactions in a block with given block number. - #[name("eth_getBlockTransactionCountByNumber")] + #[rpc(name = "eth_getBlockTransactionCountByNumber")] fn block_transaction_count_by_number(&self, BlockNumber) -> Result, Error>; /// Returns the number of uncles in a block with given hash. - #[name("eth_getUncleCountByBlockHash")] + #[rpc(name = "eth_getUncleCountByBlockHash")] fn block_uncles_count_by_hash(&self, H256) -> Result, Error>; /// Returns the number of uncles in a block with given block number. - #[name("eth_getUncleCountByBlockNumber")] + #[rpc(name = "eth_getUncleCountByBlockNumber")] fn block_uncles_count_by_number(&self, BlockNumber) -> Result, Error>; /// Returns the code at given address at given time (block number). - #[name("eth_getCode")] + #[rpc(name = "eth_getCode")] fn code_at(&self, H160, Trailing) -> Result; /// Sends signed transaction, returning its hash. - #[name("eth_sendRawTransaction")] + #[rpc(name = "eth_sendRawTransaction")] fn send_raw_transaction(&self, Bytes) -> Result; /// Call contract, returning the output data. - #[name("eth_call")] + #[rpc(name = "eth_call")] fn call(&self, CallRequest, Trailing) -> Result; /// Estimate gas needed for execution of given contract. - #[name("eth_estimateGas")] + #[rpc(name = "eth_estimateGas")] fn estimate_gas(&self, CallRequest, Trailing) -> Result; /// Get transaction by its hash. - #[name("eth_getTransactionByHash")] + #[rpc(name = "eth_getTransactionByHash")] fn transaction_by_hash(&self, H256) -> Result, Error>; /// Returns transaction at given block hash and index. - #[name("eth_getTransactionByBlockHashAndIndex")] + #[rpc(name = "eth_getTransactionByBlockHashAndIndex")] fn transaction_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; /// Returns transaction by given block number and index. - #[name("eth_getTransactionByBlockNumberAndIndex")] + #[rpc(name = "eth_getTransactionByBlockNumberAndIndex")] fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; /// Returns transaction receipt. - #[name("eth_getTransactionReceipt")] + #[rpc(name = "eth_getTransactionReceipt")] fn transaction_receipt(&self, H256) -> Result, Error>; /// Returns an uncles at given block and index. - #[name("eth_getUncleByBlockHashAndIndex")] + #[rpc(name = "eth_getUncleByBlockHashAndIndex")] fn uncle_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; /// Returns an uncles at given block and index. - #[name("eth_getUncleByBlockNumberAndIndex")] + #[rpc(name = "eth_getUncleByBlockNumberAndIndex")] fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; /// Returns available compilers. - #[name("eth_getCompilers")] + #[rpc(name = "eth_getCompilers")] fn compilers(&self) -> Result, Error>; /// Compiles lll code. - #[name("eth_compileLLL")] + #[rpc(name = "eth_compileLLL")] fn compile_lll(&self, String) -> Result; /// Compiles solidity. - #[name("eth_compileSolidity")] + #[rpc(name = "eth_compileSolidity")] fn compile_solidity(&self, String) -> Result; /// Compiles serpent. - #[name("eth_compileSerpent")] + #[rpc(name = "eth_compileSerpent")] fn compile_serpent(&self, String) -> Result; /// Returns logs matching given filter object. - #[name("eth_getLogs")] + #[rpc(name = "eth_getLogs")] fn logs(&self, Filter) -> Result, Error>; /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. - #[name("eth_getWork")] + #[rpc(name = "eth_getWork")] fn work(&self, Trailing) -> Result; /// Used for submitting a proof-of-work solution. - #[name("eth_submitWork")] + #[rpc(name = "eth_submitWork")] fn submit_work(&self, H64, H256, H256) -> Result; /// Used for submitting mining hashrate. - #[name("eth_submitHashrate")] + #[rpc(name = "eth_submitHashrate")] fn submit_hashrate(&self, U256, H256) -> Result; } } build_rpc_trait! { - /// Eth filters rpc api (polling). // TODO: do filters api properly pub trait EthFilter { /// Returns id of new filter. - #[name("eth_newFilter")] + #[rpc(name = "eth_newFilter")] fn new_filter(&self, Filter) -> Result; /// Returns id of new block filter. - #[name("eth_newBlockFilter")] + #[rpc(name = "eth_newBlockFilter")] fn new_block_filter(&self) -> Result; /// Returns id of new block filter. - #[name("eth_newPendingTransactionFilter")] + #[rpc(name = "eth_newPendingTransactionFilter")] fn new_pending_transaction_filter(&self) -> Result; /// Returns filter changes since last poll. - #[name("eth_getFilterChanges")] + #[rpc(name = "eth_getFilterChanges")] fn filter_changes(&self, Index) -> Result; /// Returns all logs matching given filter (in a range 'from' - 'to'). - #[name("eth_getFilterLogs")] + #[rpc(name = "eth_getFilterLogs")] fn filter_logs(&self, Index) -> Result, Error>; /// Uninstalls filter. - #[name("eth_uninstallFilter")] + #[rpc(name = "eth_uninstallFilter")] fn uninstall_filter(&self, Index) -> Result; } } diff --git a/rpc/src/v1/traits/ethcore.rs b/rpc/src/v1/traits/ethcore.rs index 56c27534a..25bb210fd 100644 --- a/rpc/src/v1/traits/ethcore.rs +++ b/rpc/src/v1/traits/ethcore.rs @@ -15,98 +15,107 @@ // along with Parity. If not, see . //! Ethcore-specific rpc interface. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// Ethcore-specific rpc interface. -pub trait Ethcore: Sized + Send + Sync + 'static { +use v1::helpers::auto_args::{Wrap, WrapAsync, Ready}; +use v1::types::{H160, H256, H512, U256, Bytes, Peers, Transaction, RpcSettings}; - /// Returns current transactions limit. - fn transactions_limit(&self, _: Params) -> Result; +build_rpc_trait! { + /// Ethcore-specific rpc interface. + pub trait Ethcore { + /// Returns current transactions limit. + #[rpc(name = "ethcore_transactionsLimit")] + fn transactions_limit(&self) -> Result; - /// Returns mining extra data. - fn extra_data(&self, _: Params) -> Result; + /// Returns mining extra data. + #[rpc(name = "ethcore_extraData")] + fn extra_data(&self) -> Result; - /// Returns mining gas floor target. - fn gas_floor_target(&self, _: Params) -> Result; + /// Returns mining gas floor target. + #[rpc(name = "ethcore_gasFloorTarget")] + fn gas_floor_target(&self) -> Result; - /// Returns mining gas floor cap. - fn gas_ceil_target(&self, _: Params) -> Result; + /// Returns mining gas floor cap. + #[rpc(name = "ethcore_gasCeilTarget")] + fn gas_ceil_target(&self) -> Result; - /// Returns minimal gas price for transaction to be included in queue. - fn min_gas_price(&self, _: Params) -> Result; + /// Returns minimal gas price for transaction to be included in queue. + #[rpc(name = "ethcore_minGasPrice")] + fn min_gas_price(&self) -> Result; - /// Returns latest logs - fn dev_logs(&self, _: Params) -> Result; + /// Returns latest logs + #[rpc(name = "ethcore_devLogs")] + fn dev_logs(&self) -> Result, Error>; - /// Returns logs levels - fn dev_logs_levels(&self, _: Params) -> Result; + /// Returns logs levels + #[rpc(name = "ethcore_devLogsLevels")] + fn dev_logs_levels(&self) -> Result; - /// Returns chain name - fn net_chain(&self, _: Params) -> Result; + /// Returns chain name + #[rpc(name = "ethcore_netChain")] + fn net_chain(&self) -> Result; - /// Returns peers details - fn net_peers(&self, _: Params) -> Result; + /// Returns peers details + #[rpc(name = "ethcore_netPeers")] + fn net_peers(&self) -> Result; - /// Returns network port - fn net_port(&self, _: Params) -> Result; + /// Returns network port + #[rpc(name = "ethcore_netPort")] + fn net_port(&self) -> Result; - /// Returns rpc settings - fn rpc_settings(&self, _: Params) -> Result; + /// Returns rpc settings + #[rpc(name = "ethcore_rpcSettings")] + fn rpc_settings(&self) -> Result; - /// Returns node name - fn node_name(&self, _: Params) -> Result; + /// Returns node name + #[rpc(name = "ethcore_nodeName")] + fn node_name(&self) -> Result; - /// Returns default extra data - fn default_extra_data(&self, _: Params) -> Result; + /// Returns default extra data + #[rpc(name = "ethcore_defaultExtraData")] + fn default_extra_data(&self) -> Result; - /// Returns distribution of gas price in latest blocks. - fn gas_price_statistics(&self, _: Params) -> Result; + /// Returns distribution of gas price in latest blocks. + #[rpc(name = "ethcore_gasPriceStatistics")] + fn gas_price_statistics(&self) -> Result, Error>; - /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) - /// Returns error when signer is disabled - fn unsigned_transactions_count(&self, _: Params) -> Result; + /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) + /// Returns error when signer is disabled + #[rpc(name = "ethcore_unsignedTransactionsCount")] + fn unsigned_transactions_count(&self) -> Result; - /// Returns a cryptographically random phrase sufficient for securely seeding a secret key. - fn generate_secret_phrase(&self, _: Params) -> Result; + /// Returns a cryptographically random phrase sufficient for securely seeding a secret key. + #[rpc(name = "ethcore_generateSecretPhrase")] + fn generate_secret_phrase(&self) -> Result; - /// Returns whatever address would be derived from the given phrase if it were to seed a brainwallet. - fn phrase_to_address(&self, _: Params) -> Result; + /// Returns whatever address would be derived from the given phrase if it were to seed a brainwallet. + #[rpc(name = "ethcore_phraseToAddress")] + fn phrase_to_address(&self, String) -> Result; - /// Returns the value of the registrar for this network. - fn registry_address(&self, _: Params) -> Result; + /// Returns the value of the registrar for this network. + #[rpc(name = "ethcore_registryAddress")] + fn registry_address(&self) -> Result, Error>; - /// Encrypt some data with a public key under ECIES. - /// First parameter is the 512-byte destination public key, second is the message. - fn encrypt_message(&self, _: Params) -> Result; + /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. + #[rpc(name = "ethcore_listAccounts")] + fn list_accounts(&self) -> Result>, Error>; - /// Returns all pending (current) transactions from transaction queue. - fn pending_transactions(&self, _: Params) -> Result; + /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), + /// or null if not. + #[rpc(name = "ethcore_listStorageKeys")] + fn list_storage_keys(&self, H160) -> Result>, Error>; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); + /// Encrypt some data with a public key under ECIES. + /// First parameter is the 512-byte destination public key, second is the message. + #[rpc(name = "ethcore_encryptMessage")] + fn encrypt_message(&self, H512, Bytes) -> Result; - delegate.add_method("ethcore_extraData", Ethcore::extra_data); - delegate.add_method("ethcore_gasFloorTarget", Ethcore::gas_floor_target); - delegate.add_method("ethcore_gasCeilTarget", Ethcore::gas_ceil_target); - delegate.add_method("ethcore_minGasPrice", Ethcore::min_gas_price); - delegate.add_method("ethcore_transactionsLimit", Ethcore::transactions_limit); - delegate.add_method("ethcore_devLogs", Ethcore::dev_logs); - delegate.add_method("ethcore_devLogsLevels", Ethcore::dev_logs_levels); - delegate.add_method("ethcore_netChain", Ethcore::net_chain); - delegate.add_method("ethcore_netPeers", Ethcore::net_peers); - delegate.add_method("ethcore_netPort", Ethcore::net_port); - delegate.add_method("ethcore_rpcSettings", Ethcore::rpc_settings); - delegate.add_method("ethcore_nodeName", Ethcore::node_name); - delegate.add_method("ethcore_defaultExtraData", Ethcore::default_extra_data); - delegate.add_method("ethcore_gasPriceStatistics", Ethcore::gas_price_statistics); - delegate.add_method("ethcore_unsignedTransactionsCount", Ethcore::unsigned_transactions_count); - delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase); - delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address); - delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); - delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); - delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); - delegate + /// Returns all pending transactions from transaction queue. + #[rpc(name = "ethcore_pendingTransactions")] + fn pending_transactions(&self) -> Result, Error>; + + /// Hash a file content under given URL. + #[rpc(async, name = "ethcore_hashContent")] + fn hash_content(&self, Ready, String); } -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/ethcore_set.rs b/rpc/src/v1/traits/ethcore_set.rs index bd1f6bf7c..9946314d6 100644 --- a/rpc/src/v1/traits/ethcore_set.rs +++ b/rpc/src/v1/traits/ethcore_set.rs @@ -16,66 +16,64 @@ //! Ethcore-specific rpc interface for operations altering the settings. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// Ethcore-specific rpc interface for operations altering the settings. -pub trait EthcoreSet: Sized + Send + Sync + 'static { +use v1::helpers::auto_args::Wrap; +use v1::types::{Bytes, H160, U256}; - /// Sets new minimal gas price for mined blocks. - fn set_min_gas_price(&self, _: Params) -> Result; +build_rpc_trait! { + /// Ethcore-specific rpc interface for operations altering the settings. + pub trait EthcoreSet { + /// Sets new minimal gas price for mined blocks. + #[rpc(name = "ethcore_setMinGasPrice")] + fn set_min_gas_price(&self, U256) -> Result; - /// Sets new gas floor target for mined blocks. - fn set_gas_floor_target(&self, _: Params) -> Result; + /// Sets new gas floor target for mined blocks. + #[rpc(name = "ethcore_setGasFloorTarget")] + fn set_gas_floor_target(&self, U256) -> Result; - /// Sets new gas ceiling target for mined blocks. - fn set_gas_ceil_target(&self, _: Params) -> Result; + /// Sets new gas ceiling target for mined blocks. + #[rpc(name = "ethcore_setGasCeilTarget")] + fn set_gas_ceil_target(&self, U256) -> Result; - /// Sets new extra data for mined blocks. - fn set_extra_data(&self, _: Params) -> Result; + /// Sets new extra data for mined blocks. + #[rpc(name = "ethcore_setExtraData")] + fn set_extra_data(&self, Bytes) -> Result; - /// Sets new author for mined block. - fn set_author(&self, _: Params) -> Result; + /// Sets new author for mined block. + #[rpc(name = "ethcore_setAuthor")] + fn set_author(&self, H160) -> Result; - /// Sets the limits for transaction queue. - fn set_transactions_limit(&self, _: Params) -> Result; + /// Sets the limits for transaction queue. + #[rpc(name = "ethcore_setTransactionsLimit")] + fn set_transactions_limit(&self, usize) -> Result; - /// Sets the maximum amount of gas a single transaction may consume. - fn set_tx_gas_limit(&self, _: Params) -> Result; + /// Sets the maximum amount of gas a single transaction may consume. + #[rpc(name = "ethcore_setMaxTransactionGas")] + fn set_tx_gas_limit(&self, U256) -> Result; - /// Add a reserved peer. - fn add_reserved_peer(&self, _: Params) -> Result; + /// Add a reserved peer. + #[rpc(name = "ethcore_addReservedPeer")] + fn add_reserved_peer(&self, String) -> Result; - /// Remove a reserved peer. - fn remove_reserved_peer(&self, _: Params) -> Result; + /// Remove a reserved peer. + #[rpc(name = "ethcore_removeReservedPeer")] + fn remove_reserved_peer(&self, String) -> Result; - /// Drop all non-reserved peers. - fn drop_non_reserved_peers(&self, _: Params) -> Result; + /// Drop all non-reserved peers. + #[rpc(name = "ethcore_dropNonReservedPeers")] + fn drop_non_reserved_peers(&self) -> Result; - /// Accept non-reserved peers (default behavior) - fn accept_non_reserved_peers(&self, _: Params) -> Result; + /// Accept non-reserved peers (default behavior) + #[rpc(name = "ethcore_acceptNonReservedPeers")] + fn accept_non_reserved_peers(&self) -> Result; - /// Start the network. - fn start_network(&self, _: Params) -> Result; + /// Start the network. + #[rpc(name = "ethcore_startNetwork")] + fn start_network(&self) -> Result; - /// Stop the network. - fn stop_network(&self, _: Params) -> Result; - - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("ethcore_setMinGasPrice", EthcoreSet::set_min_gas_price); - delegate.add_method("ethcore_setGasFloorTarget", EthcoreSet::set_gas_floor_target); - delegate.add_method("ethcore_setGasCeilTarget", EthcoreSet::set_gas_ceil_target); - delegate.add_method("ethcore_setExtraData", EthcoreSet::set_extra_data); - delegate.add_method("ethcore_setAuthor", EthcoreSet::set_author); - delegate.add_method("ethcore_setMaxTransactionGas", EthcoreSet::set_tx_gas_limit); - delegate.add_method("ethcore_setTransactionsLimit", EthcoreSet::set_transactions_limit); - delegate.add_method("ethcore_addReservedPeer", EthcoreSet::add_reserved_peer); - delegate.add_method("ethcore_removeReservedPeer", EthcoreSet::remove_reserved_peer); - delegate.add_method("ethcore_dropNonReservedPeers", EthcoreSet::drop_non_reserved_peers); - delegate.add_method("ethcore_acceptNonReservedPeers", EthcoreSet::accept_non_reserved_peers); - - delegate + /// Stop the network. + #[rpc(name = "ethcore_stopNetwork")] + fn stop_network(&self) -> Result; } -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/net.rs b/rpc/src/v1/traits/net.rs index 56fba3e32..36bd8be70 100644 --- a/rpc/src/v1/traits/net.rs +++ b/rpc/src/v1/traits/net.rs @@ -15,27 +15,24 @@ // along with Parity. If not, see . //! Net rpc interface. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// Net rpc interface. -pub trait Net: Sized + Send + Sync + 'static { - /// Returns protocol version. - fn version(&self, _: Params) -> Result; +use v1::helpers::auto_args::Wrap; - /// Returns number of peers connected to node. - fn peer_count(&self, _: Params) -> Result; +build_rpc_trait! { + /// Net rpc interface. + pub trait Net { + /// Returns protocol version. + #[rpc(name = "net_version")] + fn version(&self) -> Result; - /// Returns true if client is actively listening for network connections. - /// Otherwise false. - fn is_listening(&self, _: Params) -> Result; + /// Returns number of peers connected to node. + #[rpc(name = "net_peerCount")] + fn peer_count(&self) -> Result; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("net_version", Net::version); - delegate.add_method("net_peerCount", Net::peer_count); - delegate.add_method("net_listening", Net::is_listening); - delegate + /// Returns true if client is actively listening for network connections. + /// Otherwise false. + #[rpc(name = "net_listening")] + fn is_listening(&self) -> Result; } -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/rpc.rs b/rpc/src/v1/traits/rpc.rs index 669d0d8c6..2109442a7 100644 --- a/rpc/src/v1/traits/rpc.rs +++ b/rpc/src/v1/traits/rpc.rs @@ -16,26 +16,21 @@ //! RPC interface. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// RPC Interface. -pub trait Rpc: Sized + Send + Sync + 'static { +use v1::helpers::auto_args::Wrap; - /// Returns supported modules for Geth 1.3.6 - fn modules(&self, _: Params) -> Result; +use std::collections::BTreeMap; - /// Returns supported modules for Geth 1.4.0 - fn rpc_modules(&self, _: Params) -> Result; +build_rpc_trait! { + /// RPC Interface. + pub trait Rpc { + /// Returns supported modules for Geth 1.3.6 + #[rpc(name = "modules")] + fn modules(&self) -> Result, Error>; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - // Geth 1.3.6 compatibility - delegate.add_method("modules", Rpc::modules); - // Geth 1.4.0 compatibility - delegate.add_method("rpc_modules", Rpc::rpc_modules); - delegate + /// Returns supported modules for Geth 1.4.0 + #[rpc(name = "rpc_modules")] + fn rpc_modules(&self) -> Result, Error>; } -} - +} \ No newline at end of file diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index b4a45272b..fc163c54b 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -85,8 +85,14 @@ impl Into for Filter { VariadicValue::Null => None, VariadicValue::Single(t) => Some(vec![t.into()]), VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect()) - }).filter_map(|m| m).collect()).into_iter(); - vec![iter.next(), iter.next(), iter.next(), iter.next()] + }).collect()).into_iter(); + + vec![ + iter.next().unwrap_or(None), + iter.next().unwrap_or(None), + iter.next().unwrap_or(None), + iter.next().unwrap_or(None) + ] }, limit: self.limit, } @@ -121,6 +127,8 @@ mod tests { use util::hash::*; use super::*; use v1::types::BlockNumber; + use ethcore::filter::Filter as EthFilter; + use ethcore::client::BlockID; #[test] fn topic_deserialization() { @@ -148,4 +156,33 @@ mod tests { limit: None, }); } + + #[test] + fn filter_conversion() { + let filter = Filter { + from_block: Some(BlockNumber::Earliest), + to_block: Some(BlockNumber::Latest), + address: Some(VariadicValue::Multiple(vec![])), + topics: Some(vec![ + VariadicValue::Null, + VariadicValue::Single("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into()), + VariadicValue::Null, + ]), + limit: None, + }; + + let eth_filter: EthFilter = filter.into(); + assert_eq!(eth_filter, EthFilter { + from_block: BlockID::Earliest, + to_block: BlockID::Latest, + address: Some(vec![]), + topics: vec![ + None, + Some(vec!["000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".into()]), + None, + None, + ], + limit: None, + }); + } } diff --git a/rpc/src/v1/types/hash.rs b/rpc/src/v1/types/hash.rs index 3080aa031..3db0cf124 100644 --- a/rpc/src/v1/types/hash.rs +++ b/rpc/src/v1/types/hash.rs @@ -25,9 +25,10 @@ use util::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as macro_rules! impl_hash { ($name: ident, $other: ident, $size: expr) => { /// Hash serialization - #[derive(Eq)] pub struct $name([u8; $size]); + impl Eq for $name { } + impl Default for $name { fn default() -> Self { $name([0; $size]) diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 1369037ed..8aaf90eab 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -27,6 +27,7 @@ mod sync; mod transaction; mod transaction_request; mod receipt; +mod rpc_settings; mod trace; mod trace_filter; mod uint; @@ -45,6 +46,7 @@ pub use self::sync::{SyncStatus, SyncInfo, Peers}; pub use self::transaction::Transaction; pub use self::transaction_request::TransactionRequest; pub use self::receipt::Receipt; +pub use self::rpc_settings::RpcSettings; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; pub use self::uint::U256; diff --git a/rpc/src/v1/types/rpc_settings.rs b/rpc/src/v1/types/rpc_settings.rs new file mode 100644 index 000000000..9a20afa7a --- /dev/null +++ b/rpc/src/v1/types/rpc_settings.rs @@ -0,0 +1,28 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! RPC Settings data. + +/// Values of RPC settings. +#[derive(Serialize, Deserialize)] +pub struct RpcSettings { + /// Whether RPC is enabled. + pub enabled: bool, + /// The interface being listened on. + pub interface: String, + /// The port being listened on. + pub port: u64, +} \ No newline at end of file diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index f66d8e0c1..ace76827c 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -414,15 +414,15 @@ pub struct LocalizedTrace { /// Result result: Res, /// Trace address - trace_address: Vec, + trace_address: Vec, /// Subtraces - subtraces: U256, + subtraces: usize, /// Transaction position - transaction_position: U256, + transaction_position: usize, /// Transaction hash transaction_hash: H256, /// Block Number - block_number: U256, + block_number: u64, /// Block Hash block_hash: H256, } @@ -485,9 +485,9 @@ impl From for LocalizedTrace { #[derive(Debug)] pub struct Trace { /// Trace address - trace_address: Vec, + trace_address: Vec, /// Subtraces - subtraces: U256, + subtraces: usize, /// Action action: Action, /// Result @@ -601,15 +601,15 @@ mod tests { gas_used: 8.into(), output: vec![0x56, 0x78].into(), }), - trace_address: vec![10.into()], - subtraces: 1.into(), - transaction_position: 11.into(), + trace_address: vec![10], + subtraces: 1, + transaction_position: 11, transaction_hash: 12.into(), - block_number: 13.into(), + block_number: 13, block_hash: 14.into(), }; let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"result":{"gasUsed":"0x8","output":"0x5678"},"traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); + assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"result":{"gasUsed":"0x8","output":"0x5678"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); } #[test] @@ -624,15 +624,15 @@ mod tests { call_type: CallType::Call, }), result: Res::FailedCall(TraceError::OutOfGas), - trace_address: vec![10.into()], - subtraces: 1.into(), - transaction_position: 11.into(), + trace_address: vec![10], + subtraces: 1, + transaction_position: 11, transaction_hash: 12.into(), - block_number: 13.into(), + block_number: 13, block_hash: 14.into(), }; let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"error":"Out of gas","traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); + assert_eq!(serialized, r#"{"type":"call","action":{"from":"0x0000000000000000000000000000000000000004","to":"0x0000000000000000000000000000000000000005","value":"0x6","gas":"0x7","input":"0x1234","callType":"call"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); } #[test] @@ -649,15 +649,15 @@ mod tests { code: vec![0x56, 0x78].into(), address: 0xff.into(), }), - trace_address: vec![10.into()], - subtraces: 1.into(), - transaction_position: 11.into(), + trace_address: vec![10], + subtraces: 1, + transaction_position: 11, transaction_hash: 12.into(), - block_number: 13.into(), + block_number: 13, block_hash: 14.into(), }; let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"result":{"gasUsed":"0x8","code":"0x5678","address":"0x00000000000000000000000000000000000000ff"},"traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); + assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"result":{"gasUsed":"0x8","code":"0x5678","address":"0x00000000000000000000000000000000000000ff"},"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); } #[test] @@ -670,15 +670,15 @@ mod tests { init: Bytes::new(vec![0x12, 0x34]), }), result: Res::FailedCreate(TraceError::OutOfGas), - trace_address: vec![10.into()], - subtraces: 1.into(), - transaction_position: 11.into(), + trace_address: vec![10], + subtraces: 1, + transaction_position: 11, transaction_hash: 12.into(), - block_number: 13.into(), + block_number: 13, block_hash: 14.into(), }; let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"error":"Out of gas","traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); + assert_eq!(serialized, r#"{"type":"create","action":{"from":"0x0000000000000000000000000000000000000004","value":"0x6","gas":"0x7","init":"0x1234"},"error":"Out of gas","traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); } #[test] @@ -690,15 +690,15 @@ mod tests { balance: 7.into(), }), result: Res::None, - trace_address: vec![10.into()], - subtraces: 1.into(), - transaction_position: 11.into(), + trace_address: vec![10], + subtraces: 1, + transaction_position: 11, transaction_hash: 12.into(), - block_number: 13.into(), + block_number: 13, block_hash: 14.into(), }; let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"type":"suicide","action":{"address":"0x0000000000000000000000000000000000000004","refundAddress":"0x0000000000000000000000000000000000000006","balance":"0x7"},"result":null,"traceAddress":["0xa"],"subtraces":"0x1","transactionPosition":"0xb","transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":"0xd","blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); + assert_eq!(serialized, r#"{"type":"suicide","action":{"address":"0x0000000000000000000000000000000000000004","refundAddress":"0x0000000000000000000000000000000000000006","balance":"0x7"},"result":null,"traceAddress":[10],"subtraces":1,"transactionPosition":11,"transactionHash":"0x000000000000000000000000000000000000000000000000000000000000000c","blockNumber":13,"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000e"}"#); } #[test] diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index 9be7b1170..ce0fa49a2 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -23,9 +23,11 @@ use util::{U256 as EthU256, Uint}; macro_rules! impl_uint { ($name: ident, $other: ident, $size: expr) => { /// Uint serialization. - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] + #[derive(Debug, Default, Clone, Copy, PartialEq, Hash)] pub struct $name($other); + impl Eq for $name { } + impl From for $name where $other: From { fn from(o: T) -> Self { $name($other::from(o)) diff --git a/stratum/src/traits.rs b/stratum/src/traits.rs index 339f753b5..5e93a9484 100644 --- a/stratum/src/traits.rs +++ b/stratum/src/traits.rs @@ -32,7 +32,6 @@ impl From for Error { } } -#[derive(Ipc)] #[ipc(client_ident="RemoteJobDispatcher")] /// Interface that can provide pow/blockchain-specific responses for the clients pub trait JobDispatcher: Send + Sync { @@ -44,7 +43,6 @@ pub trait JobDispatcher: Send + Sync { fn job(&self, _worker_id: String) -> Option { None } } -#[derive(Ipc)] #[ipc(client_ident="RemoteWorkHandler")] /// Interface that can handle requests to push job for workers pub trait PushWorkHandler: Send + Sync { diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 99c522075..3b5fc3c9c 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -29,5 +29,6 @@ ethcore-ipc-nano = { path = "../ipc/nano" } parking_lot = "0.2.6" [features] -default = [] +default = ["ipc"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev"] +ipc = [] diff --git a/sync/build.rs b/sync/build.rs index cdb717e0e..c465d5e34 100644 --- a/sync/build.rs +++ b/sync/build.rs @@ -17,5 +17,5 @@ extern crate ethcore_ipc_codegen; fn main() { - ethcore_ipc_codegen::derive_ipc("src/api.rs").unwrap(); + ethcore_ipc_codegen::derive_ipc_cond("src/api.rs", cfg!(feature="ipc")).unwrap(); } diff --git a/sync/src/api.rs b/sync/src/api.rs index de1769d9c..c09157e3b 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::sync::Arc; +use std::str; use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError}; use util::{U256, H256}; @@ -29,9 +30,6 @@ use ipc::{BinaryConvertable, BinaryConvertError, IpcConfig}; use std::str::FromStr; use parking_lot::RwLock; -/// Ethereum sync protocol -pub const ETH_PROTOCOL: &'static str = "eth"; - /// Sync configuration #[derive(Debug, Clone, Copy)] pub struct SyncConfig { @@ -39,6 +37,8 @@ pub struct SyncConfig { pub max_download_ahead_blocks: usize, /// Network ID pub network_id: U256, + /// Main "eth" subprotocol name. + pub subprotocol_name: [u8; 3], /// Fork block to check pub fork_block: Option<(BlockNumber, H256)>, } @@ -48,6 +48,7 @@ impl Default for SyncConfig { SyncConfig { max_download_ahead_blocks: 20000, network_id: U256::from(1), + subprotocol_name: *b"eth", fork_block: None, } } @@ -68,6 +69,8 @@ pub struct EthSync { network: NetworkService, /// Protocol handler handler: Arc, + /// The main subprotocol name + subprotocol_name: [u8; 3], } impl EthSync { @@ -78,13 +81,13 @@ impl EthSync { let sync = Arc::new(EthSync{ network: service, handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain, snapshot_service: snapshot_service }), + subprotocol_name: config.subprotocol_name, }); Ok(sync) } } -#[derive(Ipc)] #[ipc(client_ident="SyncClient")] impl SyncProvider for EthSync { /// Get sync status @@ -135,7 +138,7 @@ impl ChainNotify for EthSync { sealed: Vec, _duration: u64) { - self.network.with_context(ETH_PROTOCOL, |context| { + self.network.with_context(self.subprotocol_name, |context| { let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &*self.handler.snapshot_service); self.handler.sync.write().chain_new_blocks( &mut sync_io, @@ -149,7 +152,7 @@ impl ChainNotify for EthSync { fn start(&self) { self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e)); - self.network.register_protocol(self.handler.clone(), ETH_PROTOCOL, &[62u8, 63u8, 64u8]) + self.network.register_protocol(self.handler.clone(), self.subprotocol_name, &[62u8, 63u8, 64u8]) .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); } @@ -180,7 +183,6 @@ pub trait ManageNetwork : Send + Sync { } -#[derive(Ipc)] #[ipc(client_ident="NetworkManagerClient")] impl ManageNetwork for EthSync { fn accept_unreserved_peers(&self) { @@ -204,7 +206,7 @@ impl ManageNetwork for EthSync { } fn stop_network(&self) { - self.network.with_context(ETH_PROTOCOL, |context| { + self.network.with_context(self.subprotocol_name, |context| { let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &*self.handler.snapshot_service); self.handler.sync.write().abort(&mut sync_io); }); diff --git a/sync/src/blocks.rs b/sync/src/blocks.rs index ad842ced6..ae2092f25 100644 --- a/sync/src/blocks.rs +++ b/sync/src/blocks.rs @@ -19,7 +19,7 @@ use rlp::*; use network::NetworkError; use ethcore::header::{ Header as BlockHeader}; -known_heap_size!(0, HeaderId, SyncBlock); +known_heap_size!(0, HeaderId); /// Block data with optional body. struct SyncBlock { @@ -27,6 +27,12 @@ struct SyncBlock { body: Option, } +impl HeapSizeOf for SyncBlock { + fn heap_size_of_children(&self) -> usize { + self.header.heap_size_of_children() + self.body.heap_size_of_children() + } +} + /// Used to identify header by transactions and uncles hashes #[derive(Eq, PartialEq, Hash)] struct HeaderId { @@ -178,8 +184,8 @@ impl BlockCollection { { let mut blocks = Vec::new(); let mut head = self.head; - while head.is_some() { - head = self.parents.get(&head.unwrap()).cloned(); + while let Some(h) = head { + head = self.parents.get(&h).cloned(); if let Some(head) = head { match self.blocks.get(&head) { Some(block) if block.body.is_some() => { @@ -195,7 +201,7 @@ impl BlockCollection { for block in blocks.drain(..) { let mut block_rlp = RlpStream::new_list(3); block_rlp.append_raw(&block.header, 1); - let body = Rlp::new(block.body.as_ref().unwrap()); // incomplete blocks are filtered out in the loop above + let body = Rlp::new(block.body.as_ref().expect("blocks contains only full blocks; qed")); block_rlp.append_raw(body.at(0).as_raw(), 1); block_rlp.append_raw(body.at(1).as_raw(), 1); drained.push(block_rlp.out()); @@ -219,10 +225,14 @@ impl BlockCollection { self.blocks.contains_key(hash) } - /// Return heap size. + /// Return used heap size. pub fn heap_size(&self) -> usize { - //TODO: other collections - self.blocks.heap_size_of_children() + self.heads.heap_size_of_children() + + self.blocks.heap_size_of_children() + + self.parents.heap_size_of_children() + + self.header_ids.heap_size_of_children() + + self.downloading_headers.heap_size_of_children() + + self.downloading_bodies.heap_size_of_children() } /// Check if given block hash is marked as being downloaded. diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 565c53827..446fd5499 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -90,7 +90,6 @@ use util::*; use rlp::*; use network::*; -use std::mem::{replace}; use ethcore::views::{HeaderView, BlockView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockID, BlockChainInfo, BlockImportError}; @@ -123,6 +122,7 @@ const MAX_ROUND_PARENTS: usize = 32; const MAX_NEW_HASHES: usize = 64; const MAX_TX_TO_IMPORT: usize = 512; const MAX_NEW_BLOCK_AGE: BlockNumber = 20; +const MAX_TRANSACTION_SIZE: usize = 300*1024; const STATUS_PACKET: u8 = 0x00; const NEW_BLOCK_HASHES_PACKET: u8 = 0x01; @@ -143,7 +143,7 @@ const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13; const SNAPSHOT_DATA_PACKET: u8 = 0x14; const HEADERS_TIMEOUT_SEC: f64 = 15f64; -const BODIES_TIMEOUT_SEC: f64 = 5f64; +const BODIES_TIMEOUT_SEC: f64 = 10f64; const FORK_HEADER_TIMEOUT_SEC: f64 = 3f64; const SNAPSHOT_MANIFEST_TIMEOUT_SEC: f64 = 3f64; const SNAPSHOT_DATA_TIMEOUT_SEC: f64 = 10f64; @@ -249,8 +249,6 @@ struct PeerInfo { network_id: U256, /// Peer best block hash latest_hash: H256, - /// Peer best block number if known - latest_number: Option, /// Peer total difficulty if known difficulty: Option, /// Type of data currenty being requested from peer. @@ -395,6 +393,8 @@ impl ChainSync { } self.syncing_difficulty = From::from(0u64); self.state = SyncState::Idle; + // Reactivate peers only if some progress has been made + // since the last sync round of if starting fresh. self.active_peers = self.peers.keys().cloned().collect(); } @@ -406,7 +406,8 @@ impl ChainSync { self.continue_sync(io); } - /// Remove peer from active peer set + /// Remove peer from active peer set. Peer will be reactivated on the next sync + /// round. fn deactivate_peer(&mut self, io: &mut SyncIo, peer_id: PeerId) { trace!(target: "sync", "Deactivating peer {}", peer_id); self.active_peers.remove(&peer_id); @@ -443,7 +444,6 @@ impl ChainSync { network_id: try!(r.val_at(1)), difficulty: Some(try!(r.val_at(2))), latest_hash: try!(r.val_at(3)), - latest_number: None, genesis: try!(r.val_at(4)), asking: PeerAsking::Nothing, asking_blocks: Vec::new(), @@ -480,7 +480,11 @@ impl ChainSync { } self.peers.insert(peer_id.clone(), peer); - self.active_peers.insert(peer_id.clone()); + // Don't activate peer immediatelly when searching for common block. + // Let the current sync round complete first. + if self.state != SyncState::ChainHead { + self.active_peers.insert(peer_id.clone()); + } debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); if let Some((fork_block, _)) = self.fork_block { self.request_headers_by_number(io, peer_id, fork_block, 1, 0, false, PeerAsking::ForkHeader); @@ -496,7 +500,8 @@ impl ChainSync { let confirmed = match self.peers.get_mut(&peer_id) { Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => { let item_count = r.item_count(); - if item_count == 0 || (item_count == 1 && try!(r.at(0)).as_raw().sha3() == self.fork_block.unwrap().1) { + if item_count == 0 || (item_count == 1 && + try!(r.at(0)).as_raw().sha3() == self.fork_block.expect("ForkHeader state is only entered when fork_block is some; qed").1) { peer.asking = PeerAsking::Nothing; if item_count == 0 { trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id); @@ -562,7 +567,7 @@ impl ChainSync { continue; } - if self.highest_block == None || number > self.highest_block.unwrap() { + if self.highest_block.as_ref().map_or(true, |n| number > *n) { self.highest_block = Some(number); } let hash = info.hash(); @@ -594,9 +599,9 @@ impl ChainSync { } if headers.is_empty() { - // Peer does not have any new subchain heads, deactivate it nd try with another + // Peer does not have any new subchain heads, deactivate it and try with another. trace!(target: "sync", "{} Disabled for no data", peer_id); - io.disable_peer(peer_id); + self.deactivate_peer(io, peer_id); } match self.state { SyncState::ChainHead => { @@ -675,9 +680,9 @@ impl ChainSync { } let mut unknown = false; { - let peer = self.peers.get_mut(&peer_id).unwrap(); - peer.latest_hash = header.hash(); - peer.latest_number = Some(header.number()); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.latest_hash = header.hash(); + } } if self.last_imported_block > header.number() && self.last_imported_block - header.number() > MAX_NEW_BLOCK_AGE { trace!(target: "sync", "Ignored ancient new block {:?}", h); @@ -770,9 +775,9 @@ impl ChainSync { new_hashes.push(hash.clone()); if number > max_height { trace!(target: "sync", "New unknown block hash {:?}", hash); - let peer = self.peers.get_mut(&peer_id).unwrap(); - peer.latest_hash = hash.clone(); - peer.latest_number = Some(number); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.latest_hash = hash.clone(); + } max_height = number; } }, @@ -942,19 +947,22 @@ impl ChainSync { return; } let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { - let peer = self.peers.get_mut(&peer_id).unwrap(); - if peer.asking != PeerAsking::Nothing || !peer.can_sync() { + if let Some(ref peer) = self.peers.get_mut(&peer_id) { + if peer.asking != PeerAsking::Nothing || !peer.can_sync() { + return; + } + if self.state == SyncState::Waiting { + trace!(target: "sync", "Waiting for the block queue"); + return; + } + if self.state == SyncState::SnapshotWaiting { + trace!(target: "sync", "Waiting for the snapshot restoration"); + return; + } + (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned(), peer.snapshot_hash.as_ref().cloned()) + } else { return; } - if self.state == SyncState::Waiting { - trace!(target: "sync", "Waiting for the block queue"); - return; - } - if self.state == SyncState::SnapshotWaiting { - trace!(target: "sync", "Waiting for the snapshot restoration"); - return; - } - (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned(), peer.snapshot_hash.as_ref().cloned()) }; let chain_info = io.chain().chain_info(); let td = chain_info.pending_total_difficulty; @@ -1042,14 +1050,18 @@ impl ChainSync { // check to see if we need to download any block bodies first let needed_bodies = self.blocks.needed_bodies(MAX_BODIES_TO_REQUEST, ignore_others); if !needed_bodies.is_empty() { - replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, needed_bodies.clone()); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.asking_blocks = needed_bodies.clone(); + } self.request_bodies(io, peer_id, needed_bodies); return; } // find subchain to download if let Some((h, count)) = self.blocks.needed_headers(MAX_HEADERS_TO_REQUEST, ignore_others) { - replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, vec![h.clone()]); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.asking_blocks = vec![h.clone()]; + } self.request_headers_by_hash(io, peer_id, &h, count, 0, false, PeerAsking::BlockHeaders); } } @@ -1059,34 +1071,37 @@ impl ChainSync { self.clear_peer_download(peer_id); // find chunk data to download if let Some(hash) = self.snapshot.needed_chunk() { - self.peers.get_mut(&peer_id).unwrap().asking_snapshot_data = Some(hash.clone()); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.asking_snapshot_data = Some(hash.clone()); + } self.request_snapshot_chunk(io, peer_id, &hash); } } /// Clear all blocks/headers marked as being downloaded by a peer. fn clear_peer_download(&mut self, peer_id: PeerId) { - let peer = self.peers.get_mut(&peer_id).unwrap(); - match peer.asking { - PeerAsking::BlockHeaders | PeerAsking::Heads => { - for b in &peer.asking_blocks { - self.blocks.clear_header_download(b); - } - }, - PeerAsking::BlockBodies => { - for b in &peer.asking_blocks { - self.blocks.clear_body_download(b); - } - }, - PeerAsking::SnapshotData => { - if let Some(hash) = peer.asking_snapshot_data { - self.snapshot.clear_chunk_download(&hash); - } - }, - _ => (), + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + match peer.asking { + PeerAsking::BlockHeaders | PeerAsking::Heads => { + for b in &peer.asking_blocks { + self.blocks.clear_header_download(b); + } + }, + PeerAsking::BlockBodies => { + for b in &peer.asking_blocks { + self.blocks.clear_body_download(b); + } + }, + PeerAsking::SnapshotData => { + if let Some(hash) = peer.asking_snapshot_data { + self.snapshot.clear_chunk_download(&hash); + } + }, + _ => (), + } + peer.asking_blocks.clear(); + peer.asking_snapshot_data = None; } - peer.asking_blocks.clear(); - peer.asking_snapshot_data = None; } fn block_imported(&mut self, hash: &H256, number: BlockNumber, parent: &H256) { @@ -1211,30 +1226,34 @@ impl ChainSync { /// Reset peer status after request is complete. fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool { - let peer = self.peers.get_mut(&peer_id).unwrap(); - peer.expired = false; - if peer.asking != asking { - trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); - peer.asking = PeerAsking::Nothing; + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.expired = false; + if peer.asking != asking { + trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); + peer.asking = PeerAsking::Nothing; + false + } + else { + peer.asking = PeerAsking::Nothing; + true + } + } else { false } - else { - peer.asking = PeerAsking::Nothing; - true - } } /// Generic request sender fn send_request(&mut self, sync: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) { - let peer = self.peers.get_mut(&peer_id).unwrap(); - if peer.asking != PeerAsking::Nothing { - warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); - } - peer.asking = asking; - peer.ask_time = time::precise_time_s(); - if let Err(e) = sync.send(peer_id, packet_id, packet) { - debug!(target:"sync", "Error sending request: {:?}", e); - sync.disable_peer(peer_id); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + if peer.asking != PeerAsking::Nothing { + warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); + } + peer.asking = asking; + peer.ask_time = time::precise_time_s(); + if let Err(e) = sync.send(peer_id, packet_id, packet) { + debug!(target:"sync", "Error sending request: {:?}", e); + sync.disable_peer(peer_id); + } } } @@ -1261,7 +1280,12 @@ impl ChainSync { item_count = min(item_count, MAX_TX_TO_IMPORT); let mut transactions = Vec::with_capacity(item_count); for i in 0 .. item_count { - let tx = try!(r.at(i)).as_raw().to_vec(); + let rlp = try!(r.at(i)); + if rlp.as_raw().len() > MAX_TRANSACTION_SIZE { + debug!("Skipped oversized transaction of {} bytes", rlp.as_raw().len()); + continue; + } + let tx = rlp.as_raw().to_vec(); transactions.push(tx); } io.chain().queue_transactions(transactions); @@ -1604,7 +1628,7 @@ impl ChainSync { /// creates latest block rlp for the given client fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { let mut rlp_stream = RlpStream::new_list(2); - rlp_stream.append_raw(&chain.block(BlockID::Hash(chain.chain_info().best_block_hash)).unwrap(), 1); + rlp_stream.append_raw(&chain.block(BlockID::Hash(chain.chain_info().best_block_hash)).expect("Best block always exists"), 1); rlp_stream.append(&chain.chain_info().total_difficulty); rlp_stream.out() } @@ -1618,25 +1642,23 @@ impl ChainSync { } /// returns peer ids that have less blocks than our chain - fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<(PeerId, BlockNumber)> { + fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec { let latest_hash = chain_info.best_block_hash; - let latest_number = chain_info.best_block_number; self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)| match io.chain().block_status(BlockID::Hash(peer_info.latest_hash.clone())) { BlockStatus::InChain => { - if peer_info.latest_number.is_none() { - peer_info.latest_number = Some(HeaderView::new(&io.chain().block_header(BlockID::Hash(peer_info.latest_hash.clone())).unwrap()).number()); + if peer_info.latest_hash != latest_hash { + Some(id) + } else { + None } - if peer_info.latest_hash != latest_hash && latest_number > peer_info.latest_number.unwrap() { - Some((id, peer_info.latest_number.unwrap())) - } else { None } }, _ => None }) .collect::>() } - fn select_random_lagging_peers(&mut self, peers: &[(PeerId, BlockNumber)]) -> Vec<(PeerId, BlockNumber)> { + fn select_random_lagging_peers(&mut self, peers: &[PeerId]) -> Vec { use rand::Rng; // take sqrt(x) peers let mut peers = peers.to_vec(); @@ -1649,46 +1671,42 @@ impl ChainSync { } /// propagates latest block to lagging peers - fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[(PeerId, BlockNumber)]) -> usize { + fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[PeerId]) -> usize { trace!(target: "sync", "Sending NewBlocks to {:?}", peers); let mut sent = 0; - for &(peer_id, _) in peers { + for peer_id in peers { if sealed.is_empty() { let rlp = ChainSync::create_latest_block_rlp(io.chain()); - self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp); + self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); } else { for h in sealed { let rlp = ChainSync::create_new_block_rlp(io.chain(), h); - self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp); + self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); } } - self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone(); - self.peers.get_mut(&peer_id).unwrap().latest_number = Some(chain_info.best_block_number); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.latest_hash = chain_info.best_block_hash.clone(); + } sent += 1; } sent } /// propagates new known hashes to all peers - fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[(PeerId, BlockNumber)]) -> usize { + fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize { trace!(target: "sync", "Sending NewHashes to {:?}", peers); let mut sent = 0; - let last_parent = HeaderView::new(&io.chain().block_header(BlockID::Hash(chain_info.best_block_hash.clone())).unwrap()).parent_hash(); - for &(peer_id, peer_number) in peers { - let peer_best = if chain_info.best_block_number - peer_number > MAX_PEER_LAG_PROPAGATION as BlockNumber { - // If we think peer is too far behind just send one latest hash - last_parent.clone() - } else { - self.peers.get(&peer_id).unwrap().latest_hash.clone() - }; - sent += match ChainSync::create_new_hashes_rlp(io.chain(), &peer_best, &chain_info.best_block_hash) { + let last_parent = HeaderView::new(&io.chain().block_header(BlockID::Hash(chain_info.best_block_hash.clone())) + .expect("Best block always exists")).parent_hash(); + for peer_id in peers { + sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) { Some(rlp) => { { - let peer = self.peers.get_mut(&peer_id).unwrap(); - peer.latest_hash = chain_info.best_block_hash.clone(); - peer.latest_number = Some(chain_info.best_block_number); + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.latest_hash = chain_info.best_block_hash.clone(); + } } - self.send_packet(io, peer_id, NEW_BLOCK_HASHES_PACKET, rlp); + self.send_packet(io, *peer_id, NEW_BLOCK_HASHES_PACKET, rlp); 1 }, None => 0 @@ -2001,7 +2019,6 @@ mod tests { genesis: H256::zero(), network_id: U256::zero(), latest_hash: peer_latest_hash, - latest_number: None, difficulty: None, asking: PeerAsking::Nothing, asking_blocks: Vec::new(), diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d2c6e2583..af566772e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -63,3 +63,9 @@ pub use api::{EthSync, SyncProvider, SyncClient, NetworkManagerClient, ManageNet ServiceConfiguration, NetworkConfiguration}; pub use chain::{SyncStatus, SyncState}; pub use network::{is_valid_node_url, NonReservedPeerMode, NetworkError}; + +/// IPC interfaces +#[cfg(feature="ipc")] +pub mod remote { + pub use api::{SyncClient, NetworkManagerClient}; +} diff --git a/sync/src/sync_io.rs b/sync/src/sync_io.rs index fa95941ea..445939399 100644 --- a/sync/src/sync_io.rs +++ b/sync/src/sync_io.rs @@ -17,7 +17,6 @@ use network::{NetworkContext, PeerId, PacketId, NetworkError}; use ethcore::client::BlockChainClient; use ethcore::snapshot::SnapshotService; -use api::ETH_PROTOCOL; /// IO interface for the syning handler. /// Provides peer connection management and an interface to the blockchain client. @@ -101,7 +100,7 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { } fn eth_protocol_version(&self, peer_id: PeerId) -> u8 { - self.network.protocol_version(peer_id, ETH_PROTOCOL).unwrap_or(0) + self.network.protocol_version(peer_id, self.network.subprotocol_name()).unwrap_or(0) } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index d8d3d0711..c54529beb 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -95,6 +95,27 @@ fn forked() { assert_eq!(&*net.peer(2).chain.numbers.read(), &peer1_chain); } +#[test] +fn forked_with_misbehaving_peer() { + ::env_logger::init().ok(); + let mut net = TestNet::new(3); + // peer 0 is on a totally different chain with higher total difficulty + net.peer_mut(0).chain = TestBlockChainClient::new_with_extra_data(b"fork".to_vec()); + net.peer_mut(0).chain.add_blocks(500, EachBlockWith::Nothing); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Nothing); + net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Nothing); + + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Nothing); + net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle); + // peer 1 should sync to peer 2, others should not change + let peer0_chain = net.peer(0).chain.numbers.read().clone(); + let peer2_chain = net.peer(2).chain.numbers.read().clone(); + net.sync(); + assert_eq!(&*net.peer(0).chain.numbers.read(), &peer0_chain); + assert_eq!(&*net.peer(1).chain.numbers.read(), &peer2_chain); + assert_eq!(&*net.peer(2).chain.numbers.read(), &peer2_chain); +} + #[test] fn net_hard_fork() { ::env_logger::init().ok(); @@ -116,11 +137,12 @@ fn net_hard_fork() { #[test] fn restart() { + ::env_logger::init().ok(); let mut net = TestNet::new(3); net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); - net.sync_steps(8); + net.sync(); // make sure that sync has actually happened assert!(net.peer(0).chain.chain_info().best_block_number > 100); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index cbed49eff..3558e5578 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -29,6 +29,7 @@ pub struct TestIo<'p> { pub snapshot_service: &'p TestSnapshotService, pub queue: &'p mut VecDeque, pub sender: Option, + pub to_disconnect: HashSet, } impl<'p> TestIo<'p> { @@ -37,16 +38,19 @@ impl<'p> TestIo<'p> { chain: chain, snapshot_service: ss, queue: queue, - sender: sender + sender: sender, + to_disconnect: HashSet::new(), } } } impl<'p> SyncIo for TestIo<'p> { - fn disable_peer(&mut self, _peer_id: PeerId) { + fn disable_peer(&mut self, peer_id: PeerId) { + self.disconnect_peer(peer_id); } - fn disconnect_peer(&mut self, _peer_id: PeerId) { + fn disconnect_peer(&mut self, peer_id: PeerId) { + self.to_disconnect.insert(peer_id); } fn is_expired(&self) -> bool { @@ -150,13 +154,30 @@ impl TestNet { pub fn sync_step(&mut self) { for peer in 0..self.peers.len() { if let Some(packet) = self.peers[peer].queue.pop_front() { - let mut p = self.peers.get_mut(packet.recipient).unwrap(); - trace!("--- {} -> {} ---", peer, packet.recipient); - ChainSync::dispatch_packet(&p.sync, &mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); - trace!("----------------"); + let disconnecting = { + let mut p = self.peers.get_mut(packet.recipient).unwrap(); + trace!("--- {} -> {} ---", peer, packet.recipient); + let to_disconnect = { + let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)); + ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data); + io.to_disconnect + }; + for d in &to_disconnect { + // notify this that disconnecting peers are disconnecting + let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(*d)); + p.sync.write().on_peer_aborting(&mut io, *d); + } + to_disconnect + }; + for d in &disconnecting { + // notify other peers that this peer is disconnecting + let mut p = self.peers.get_mut(*d).unwrap(); + let mut io = TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, Some(peer as PeerId)); + p.sync.write().on_peer_aborting(&mut io, peer as PeerId); + } } - let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.write().maintain_sync(&mut TestIo::new(&mut p.chain, &p.snapshot_service, &mut p.queue, None)); + + self.sync_step_peer(peer); } } diff --git a/util/Cargo.toml b/util/Cargo.toml index 81916555c..520a4e003 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -34,6 +34,7 @@ using_queue = { path = "using_queue" } table = { path = "table" } ansi_term = "0.7" tiny-keccak= "1.0" +ethcore-bloom-journal = { path = "bloom" } [features] default = [] diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index ee25ce846..c21b5239f 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://ethcore.io" repository = "https://github.com/ethcore/parity" license = "GPL-3.0" name = "ethcore-bigint" -version = "0.1.0" +version = "0.1.1" authors = ["Ethcore "] build = "build.rs" diff --git a/util/bigint/src/hash.rs b/util/bigint/src/hash.rs index 97b9545bc..f782d1f90 100644 --- a/util/bigint/src/hash.rs +++ b/util/bigint/src/hash.rs @@ -64,11 +64,11 @@ pub fn clean_0x(s: &str) -> &str { macro_rules! impl_hash { ($from: ident, $size: expr) => { - #[derive(Eq)] #[repr(C)] /// Unformatted binary data of fixed length. pub struct $from (pub [u8; $size]); + impl From<[u8; $size]> for $from { fn from(bytes: [u8; $size]) -> Self { $from(bytes) @@ -210,6 +210,8 @@ macro_rules! impl_hash { } } + impl Eq for $from {} + impl PartialEq for $from { fn eq(&self, other: &Self) -> bool { for i in 0..$size { diff --git a/util/bloom/Cargo.toml b/util/bloom/Cargo.toml new file mode 100644 index 000000000..5397c691b --- /dev/null +++ b/util/bloom/Cargo.toml @@ -0,0 +1,9 @@ +[project] +name = "ethcore-bloom-journal" +version = "0.1.0" +authors = ["Ethcore"] +description = "Journaling bloom filter" +license = "GPL3" + +[lib] +path = "src/lib.rs" diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs new file mode 100644 index 000000000..582437651 --- /dev/null +++ b/util/bloom/src/lib.rs @@ -0,0 +1,247 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::cmp; +use std::mem; +use std::f64; +use std::hash::{Hash, Hasher, SipHasher}; +use std::collections::HashSet; + +/// BitVec structure with journalling +/// Every time any of the blocks is getting set it's index is tracked +/// and can be then drained by `drain` method +struct BitVecJournal { + elems: Vec, + journal: HashSet, +} + +impl BitVecJournal { + pub fn new(size: usize) -> BitVecJournal { + let extra = if size % 8 > 0 { 1 } else { 0 }; + BitVecJournal { + elems: vec![0u64; size / 8 + extra], + journal: HashSet::new(), + } + } + + pub fn from_parts(parts: &[u64]) -> BitVecJournal { + BitVecJournal { + elems: parts.to_vec(), + journal: HashSet::new(), + } + } + + pub fn set(&mut self, index: usize) { + let e_index = index / 64; + let bit_index = index % 64; + let val = self.elems.get_mut(e_index).unwrap(); + *val |= 1u64 << bit_index; + self.journal.insert(e_index); + } + + pub fn get(&self, index: usize) -> bool { + let e_index = index / 64; + let bit_index = index % 64; + self.elems[e_index] & (1 << bit_index) != 0 + } + + pub fn drain(&mut self) -> Vec<(usize, u64)> { + let journal = mem::replace(&mut self.journal, HashSet::new()).into_iter(); + journal.map(|idx| (idx, self.elems[idx])).collect::>() + } + + pub fn saturation(&self) -> f64 { + self.elems.iter().fold(0u64, |acc, e| acc + e.count_ones() as u64) as f64 / (self.elems.len() * 64) as f64 + } +} + +/// Bloom filter structure +pub struct Bloom { + bitmap: BitVecJournal, + bitmap_bits: u64, + k_num: u32, + sips: [SipHasher; 2], +} + +impl Bloom { + /// Create a new bloom filter structure. + /// bitmap_size is the size in bytes (not bits) that will be allocated in memory + /// items_count is an estimation of the maximum number of items to store. + pub fn new(bitmap_size: usize, items_count: usize) -> Bloom { + assert!(bitmap_size > 0 && items_count > 0); + let bitmap_bits = (bitmap_size as u64) * 8u64; + let k_num = Bloom::optimal_k_num(bitmap_bits, items_count); + let bitmap = BitVecJournal::new(bitmap_bits as usize); + let sips = [Bloom::sip_new(), Bloom::sip_new()]; + Bloom { + bitmap: bitmap, + bitmap_bits: bitmap_bits, + k_num: k_num, + sips: sips, + } + } + + /// Initializes bloom filter from saved state + pub fn from_parts(parts: &[u64], k_num: u32) -> Bloom { + let bitmap_size = parts.len() * 8; + let bitmap_bits = (bitmap_size as u64) * 8u64; + let bitmap = BitVecJournal::from_parts(parts); + let sips = [Bloom::sip_new(), Bloom::sip_new()]; + Bloom { + bitmap: bitmap, + bitmap_bits: bitmap_bits, + k_num: k_num, + sips: sips, + } + } + + /// Create a new bloom filter structure. + /// items_count is an estimation of the maximum number of items to store. + /// fp_p is the wanted rate of false positives, in ]0.0, 1.0[ + pub fn new_for_fp_rate(items_count: usize, fp_p: f64) -> Bloom { + let bitmap_size = Bloom::compute_bitmap_size(items_count, fp_p); + Bloom::new(bitmap_size, items_count) + } + + /// Compute a recommended bitmap size for items_count items + /// and a fp_p rate of false positives. + /// fp_p obviously has to be within the ]0.0, 1.0[ range. + pub fn compute_bitmap_size(items_count: usize, fp_p: f64) -> usize { + assert!(items_count > 0); + assert!(fp_p > 0.0 && fp_p < 1.0); + let log2 = f64::consts::LN_2; + let log2_2 = log2 * log2; + ((items_count as f64) * f64::ln(fp_p) / (-8.0 * log2_2)).ceil() as usize + } + + /// Records the presence of an item. + pub fn set(&mut self, item: T) + where T: Hash + { + let mut hashes = [0u64, 0u64]; + for k_i in 0..self.k_num { + let bit_offset = (self.bloom_hash(&mut hashes, &item, k_i) % self.bitmap_bits) as usize; + self.bitmap.set(bit_offset); + } + } + + /// Check if an item is present in the set. + /// There can be false positives, but no false negatives. + pub fn check(&self, item: T) -> bool + where T: Hash + { + let mut hashes = [0u64, 0u64]; + for k_i in 0..self.k_num { + let bit_offset = (self.bloom_hash(&mut hashes, &item, k_i) % self.bitmap_bits) as usize; + if !self.bitmap.get(bit_offset) { + return false; + } + } + true + } + + /// Return the number of bits in the filter + pub fn number_of_bits(&self) -> u64 { + self.bitmap_bits + } + + /// Return the number of hash functions used for `check` and `set` + pub fn number_of_hash_functions(&self) -> u32 { + self.k_num + } + + fn optimal_k_num(bitmap_bits: u64, items_count: usize) -> u32 { + let m = bitmap_bits as f64; + let n = items_count as f64; + let k_num = (m / n * f64::ln(2.0f64)).ceil() as u32; + cmp::max(k_num, 1) + } + + fn bloom_hash(&self, hashes: &mut [u64; 2], item: &T, k_i: u32) -> u64 + where T: Hash + { + if k_i < 2 { + let sip = &mut self.sips[k_i as usize].clone(); + item.hash(sip); + let hash = sip.finish(); + hashes[k_i as usize] = hash; + hash + } else { + hashes[0].wrapping_add((k_i as u64).wrapping_mul(hashes[1]) % 0xffffffffffffffc5) + } + } + + fn sip_new() -> SipHasher { + SipHasher::new() + } + + /// Drains the bloom journal returning the updated bloom part + pub fn drain_journal(&mut self) -> BloomJournal { + BloomJournal { + entries: self.bitmap.drain(), + hash_functions: self.k_num, + } + } + + /// Returns the ratio of set bits in the bloom filter to the total bits + pub fn saturation(&self) -> f64 { + self.bitmap.saturation() + } +} + +/// Bloom journal +/// Returns the tuple of (bloom part index, bloom part value) where each one is representing +/// an index of bloom parts that was updated since the last drain +pub struct BloomJournal { + pub hash_functions: u32, + pub entries: Vec<(usize, u64)>, +} + + +#[cfg(test)] +mod tests { + use super::Bloom; + + #[test] + fn get_set() { + let mut bloom = Bloom::new(10, 80); + let key = vec![115u8, 99]; + assert!(!bloom.check(&key)); + bloom.set(&key); + assert!(bloom.check(&key)); + } + + #[test] + fn journalling() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + let drain = bloom.drain_journal(); + + assert_eq!(2, drain.entries.len()) + } + + #[test] + fn saturation() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + + let full = bloom.saturation(); + // 2/8/64 = 0.00390625 + assert!(full >= 0.0039f64 && full <= 0.004f64); + } +} diff --git a/util/fetch/Cargo.toml b/util/fetch/Cargo.toml new file mode 100644 index 000000000..663d167bf --- /dev/null +++ b/util/fetch/Cargo.toml @@ -0,0 +1,18 @@ +[package] +description = "HTTP/HTTPS fetching library" +homepage = "http://ethcore.io" +license = "GPL-3.0" +name = "fetch" +version = "0.1.0" +authors = ["Ethcore "] + +[dependencies] +log = "0.3" +rand = "0.3" +hyper = { default-features = false, git = "https://github.com/ethcore/hyper" } +https-fetch = { path = "../https-fetch" } +clippy = { version = "0.0.90", optional = true} + +[features] +default = [] +dev = ["clippy"] diff --git a/util/fetch/src/client.rs b/util/fetch/src/client.rs new file mode 100644 index 000000000..bb8842a5b --- /dev/null +++ b/util/fetch/src/client.rs @@ -0,0 +1,146 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Fetching + +use std::{env, io}; +use std::sync::{mpsc, Arc}; +use std::sync::atomic::AtomicBool; +use std::path::PathBuf; + +use hyper; +use https_fetch as https; + +use fetch_file::{FetchHandler, Error as HttpFetchError}; + +pub type FetchResult = Result; + +#[derive(Debug)] +pub enum FetchError { + InvalidUrl, + Http(HttpFetchError), + Https(https::FetchError), + Io(io::Error), + Other(String), +} + +impl From for FetchError { + fn from(e: HttpFetchError) -> Self { + FetchError::Http(e) + } +} + +impl From for FetchError { + fn from(e: io::Error) -> Self { + FetchError::Io(e) + } +} + +pub trait Fetch: Default + Send { + /// Fetch URL and get the result in callback. + fn request_async(&mut self, url: &str, abort: Arc, on_done: Box) -> Result<(), FetchError>; + + /// Fetch URL and get a result Receiver. You will be notified when receiver is ready by `on_done` callback. + fn request(&mut self, url: &str, abort: Arc, on_done: Box) -> Result, FetchError> { + let (tx, rx) = mpsc::channel(); + try!(self.request_async(url, abort, Box::new(move |result| { + let res = tx.send(result); + if let Err(_) = res { + warn!("Fetch finished, but no one was listening"); + } + on_done(); + }))); + Ok(rx) + } + + /// Closes this client + fn close(self) {} + + /// Returns a random filename + fn random_filename() -> String { + use ::rand::Rng; + let mut rng = ::rand::OsRng::new().unwrap(); + rng.gen_ascii_chars().take(12).collect() + } +} + +pub struct Client { + http_client: hyper::Client, + https_client: https::Client, + limit: Option, +} + +impl Default for Client { + fn default() -> Self { + // Max 15MB will be downloaded. + Client::with_limit(Some(15*1024*1024)) + } +} + +impl Client { + fn with_limit(limit: Option) -> Self { + Client { + http_client: hyper::Client::new().expect("Unable to initialize http client."), + https_client: https::Client::with_limit(limit).expect("Unable to initialize https client."), + limit: limit, + } + } + + fn convert_url(url: hyper::Url) -> Result { + let host = format!("{}", try!(url.host().ok_or(FetchError::InvalidUrl))); + let port = try!(url.port_or_known_default().ok_or(FetchError::InvalidUrl)); + https::Url::new(&host, port, url.path()).map_err(|_| FetchError::InvalidUrl) + } + + fn temp_path() -> PathBuf { + let mut dir = env::temp_dir(); + dir.push(Self::random_filename()); + dir + } +} + +impl Fetch for Client { + fn close(self) { + self.http_client.close(); + self.https_client.close(); + } + + fn request_async(&mut self, url: &str, abort: Arc, on_done: Box) -> Result<(), FetchError> { + let is_https = url.starts_with("https://"); + let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl)); + let temp_path = Self::temp_path(); + + trace!(target: "fetch", "Fetching from: {:?}", url); + + if is_https { + let url = try!(Self::convert_url(url)); + try!(self.https_client.fetch_to_file( + url, + temp_path.clone(), + abort, + move |result| on_done(result.map(|_| temp_path).map_err(FetchError::Https)), + ).map_err(|e| FetchError::Other(format!("{:?}", e)))); + } else { + try!(self.http_client.request( + url, + FetchHandler::new(temp_path, abort, Box::new(move |result| on_done(result)), self.limit.map(|v| v as u64).clone()), + ).map_err(|e| FetchError::Other(format!("{:?}", e)))); + } + + Ok(()) + } +} + diff --git a/dapps/src/handlers/client/fetch_file.rs b/util/fetch/src/fetch_file.rs similarity index 82% rename from dapps/src/handlers/client/fetch_file.rs rename to util/fetch/src/fetch_file.rs index c18fb6d5b..4801cc969 100644 --- a/dapps/src/handlers/client/fetch_file.rs +++ b/util/fetch/src/fetch_file.rs @@ -16,12 +16,11 @@ //! Hyper Client Handler to Fetch File -use std::{env, io, fs, fmt}; +use std::{io, fs, fmt}; use std::path::PathBuf; -use std::sync::{mpsc, Arc}; +use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; -use random_filename; use hyper::status::StatusCode; use hyper::client::{Request, Response, DefaultTransport as HttpStream}; @@ -34,30 +33,31 @@ use super::FetchError; pub enum Error { Aborted, NotStarted, + SizeLimit, UnexpectedStatus(StatusCode), IoError(io::Error), HyperError(hyper::Error), } pub type FetchResult = Result; -pub type OnDone = Box; +pub type OnDone = Box; -pub struct Fetch { +pub struct FetchHandler { path: PathBuf, abort: Arc, file: Option, result: Option, - sender: mpsc::Sender, on_done: Option, + size_limit: Option, } -impl fmt::Debug for Fetch { +impl fmt::Debug for FetchHandler { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "Fetch {{ path: {:?}, file: {:?}, result: {:?} }}", self.path, self.file, self.result) } } -impl Drop for Fetch { +impl Drop for FetchHandler { fn drop(&mut self) { let res = self.result.take().unwrap_or(Err(Error::NotStarted.into())); // Remove file if there was an error @@ -69,40 +69,35 @@ impl Drop for Fetch { } } // send result - let _ = self.sender.send(res); if let Some(f) = self.on_done.take() { - f(); + f(res); } } } -impl Fetch { - pub fn new(sender: mpsc::Sender, abort: Arc, on_done: OnDone) -> Self { - let mut dir = env::temp_dir(); - dir.push(random_filename()); - - Fetch { - path: dir, +impl FetchHandler { + pub fn new(path: PathBuf, abort: Arc, on_done: OnDone, size_limit: Option) -> Self { + FetchHandler { + path: path, abort: abort, file: None, result: None, - sender: sender, on_done: Some(on_done), + size_limit: size_limit, } } -} -impl Fetch { fn is_aborted(&self) -> bool { self.abort.load(Ordering::SeqCst) } + fn mark_aborted(&mut self) -> Next { self.result = Some(Err(Error::Aborted.into())); Next::end() } } -impl hyper::client::Handler for Fetch { +impl hyper::client::Handler for FetchHandler { fn on_request(&mut self, req: &mut Request) -> Next { if self.is_aborted() { return self.mark_aborted(); @@ -147,7 +142,19 @@ impl hyper::client::Handler for Fetch { } match io::copy(decoder, self.file.as_mut().expect("File is there because on_response has created it.")) { Ok(0) => Next::end(), - Ok(_) => read(), + Ok(bytes_read) => match self.size_limit { + None => read(), + // Check limit + Some(limit) if limit > bytes_read => { + self.size_limit = Some(limit - bytes_read); + read() + }, + // Size limit reached + _ => { + self.result = Some(Err(Error::SizeLimit.into())); + Next::end() + }, + }, Err(e) => match e.kind() { io::ErrorKind::WouldBlock => Next::read(), _ => { diff --git a/ethcore/src/types/block_queue_info.rs b/util/fetch/src/lib.rs similarity index 55% rename from ethcore/src/types/block_queue_info.rs rename to util/fetch/src/lib.rs index d299258ce..8ec9e0ddd 100644 --- a/ethcore/src/types/block_queue_info.rs +++ b/util/fetch/src/lib.rs @@ -14,21 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Block queue info types +//! A service to fetch any HTTP / HTTPS content. -/// Block queue status -#[derive(Debug, Binary)] -pub struct BlockQueueInfo { - /// Number of queued blocks pending verification - pub unverified_queue_size: usize, - /// Number of verified queued blocks pending import - pub verified_queue_size: usize, - /// Number of blocks being verified - pub verifying_queue_size: usize, - /// Configured maximum number of blocks in the queue - pub max_queue_size: usize, - /// Configured maximum number of bytes to use - pub max_mem_use: usize, - /// Heap memory used in bytes - pub mem_used: usize, -} +#[macro_use] +extern crate log; +extern crate hyper; +extern crate https_fetch; +extern crate rand; + + +pub mod client; +pub mod fetch_file; + +pub use self::client::{Client, Fetch, FetchError, FetchResult}; diff --git a/util/https-fetch/src/client.rs b/util/https-fetch/src/client.rs index 3e5d50515..ad75f2ca4 100644 --- a/util/https-fetch/src/client.rs +++ b/util/https-fetch/src/client.rs @@ -78,6 +78,10 @@ impl Drop for Client { impl Client { pub fn new() -> Result { + Self::with_limit(None) + } + + pub fn with_limit(size_limit: Option) -> Result { let mut event_loop = try!(mio::EventLoop::new()); let channel = event_loop.channel(); @@ -85,6 +89,7 @@ impl Client { let mut client = ClientLoop { next_token: 0, sessions: HashMap::new(), + size_limit: size_limit, }; event_loop.run(&mut client).unwrap(); }); @@ -128,6 +133,7 @@ impl Client { pub struct ClientLoop { next_token: usize, sessions: HashMap, + size_limit: Option, } impl mio::Handler for ClientLoop { @@ -154,7 +160,7 @@ impl mio::Handler for ClientLoop { let token = self.next_token; self.next_token += 1; - if let Ok(mut tlsclient) = TlsClient::new(mio::Token(token), &url, writer, abort, callback) { + if let Ok(mut tlsclient) = TlsClient::new(mio::Token(token), &url, writer, abort, callback, self.size_limit.clone()) { let httpreq = format!( "GET {} HTTP/1.1\r\nHost: {}\r\nConnection: close\r\nAccept-Encoding: identity\r\n\r\n", url.path(), diff --git a/util/https-fetch/src/http.rs b/util/https-fetch/src/http.rs index d29974c01..5f40ca4cb 100644 --- a/util/https-fetch/src/http.rs +++ b/util/https-fetch/src/http.rs @@ -35,18 +35,20 @@ pub struct HttpProcessor { status: Option, headers: Vec, body_writer: io::BufWriter>, + size_limit: Option, } const BREAK_LEN: usize = 2; impl HttpProcessor { - pub fn new(body_writer: Box) -> Self { + pub fn new(body_writer: Box, size_limit: Option) -> Self { HttpProcessor { state: State::WaitingForStatus, buffer: Cursor::new(Vec::new()), status: None, headers: Vec::new(), - body_writer: io::BufWriter::new(body_writer) + body_writer: io::BufWriter::new(body_writer), + size_limit: size_limit, } } @@ -140,6 +142,15 @@ impl HttpProcessor { }, State::WritingBody => { let len = self.buffer.get_ref().len(); + match self.size_limit { + None => {}, + Some(limit) if limit > len => {}, + _ => { + warn!("Finishing file fetching because limit was reached."); + self.set_state(State::Finished); + continue; + } + } try!(self.body_writer.write_all(self.buffer.get_ref())); self.buffer_consume(len); return Ok(()); @@ -167,6 +178,17 @@ impl HttpProcessor { }, // Buffers the data until we have a full chunk State::WritingChunk(left) if self.buffer.get_ref().len() >= left => { + match self.size_limit { + None => {}, + Some(limit) if limit > left => { + self.size_limit = Some(limit - left); + }, + _ => { + warn!("Finishing file fetching because limit was reached."); + self.set_state(State::Finished); + continue; + } + } try!(self.body_writer.write_all(&self.buffer.get_ref()[0..left])); self.buffer_consume(left + BREAK_LEN); @@ -230,7 +252,7 @@ mod tests { #[test] fn should_be_able_to_process_status_line() { // given - let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new()))); + let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new())), None); // when let out = @@ -249,7 +271,7 @@ mod tests { #[test] fn should_be_able_to_process_headers() { // given - let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new()))); + let mut http = HttpProcessor::new(Box::new(Cursor::new(Vec::new())), None); // when let out = @@ -274,7 +296,7 @@ mod tests { fn should_be_able_to_consume_body() { // given let (writer, data) = Writer::new(); - let mut http = HttpProcessor::new(Box::new(writer)); + let mut http = HttpProcessor::new(Box::new(writer), None); // when let out = @@ -301,7 +323,7 @@ mod tests { fn should_correctly_handle_chunked_content() { // given let (writer, data) = Writer::new(); - let mut http = HttpProcessor::new(Box::new(writer)); + let mut http = HttpProcessor::new(Box::new(writer), None); // when let out = @@ -331,4 +353,40 @@ mod tests { assert_eq!(data.borrow().get_ref()[..], b"Parity in\r\n\r\nchunks."[..]); assert_eq!(http.state(), State::Finished); } + + #[test] + fn should_stop_fetching_when_limit_is_reached() { + // given + let (writer, data) = Writer::new(); + let mut http = HttpProcessor::new(Box::new(writer), Some(5)); + + // when + let out = + "\ + HTTP/1.1 200 OK\r\n\ + Host: 127.0.0.1:8080\r\n\ + Transfer-Encoding: chunked\r\n\ + Connection: close\r\n\ + \r\n\ + 4\r\n\ + Pari\r\n\ + 3\r\n\ + ty \r\n\ + D\r\n\ + in\r\n\ + \r\n\ + chunks.\r\n\ + 0\r\n\ + \r\n\ + "; + http.write_all(out.as_bytes()).unwrap(); + http.flush().unwrap(); + + // then + assert_eq!(http.status().unwrap(), "HTTP/1.1 200 OK"); + assert_eq!(http.headers().len(), 3); + assert_eq!(data.borrow().get_ref()[..], b"Pari"[..]); + assert_eq!(http.state(), State::Finished); + } + } diff --git a/util/https-fetch/src/tlsclient.rs b/util/https-fetch/src/tlsclient.rs index e3ce44764..62af2b06c 100644 --- a/util/https-fetch/src/tlsclient.rs +++ b/util/https-fetch/src/tlsclient.rs @@ -87,6 +87,7 @@ impl TlsClient { writer: Box, abort: Arc, mut callback: Box, + size_limit: Option, ) -> Result { let res = TlsClient::make_config().and_then(|cfg| { TcpStream::connect(url.address()).map(|sock| { @@ -98,7 +99,7 @@ impl TlsClient { Ok((cfg, sock)) => Ok(TlsClient { abort: abort, token: token, - writer: HttpProcessor::new(writer), + writer: HttpProcessor::new(writer, size_limit), socket: sock, closing: false, error: None, diff --git a/util/io/src/lib.rs b/util/io/src/lib.rs index b2a16e19b..082192dfa 100644 --- a/util/io/src/lib.rs +++ b/util/io/src/lib.rs @@ -68,6 +68,8 @@ mod panics; use mio::{EventLoop, Token}; use std::fmt; +pub use worker::LOCAL_STACK_SIZE; + #[derive(Debug)] /// IO Error pub enum IoError { diff --git a/util/io/src/worker.rs b/util/io/src/worker.rs index 0176c467c..f4f63919f 100644 --- a/util/io/src/worker.rs +++ b/util/io/src/worker.rs @@ -22,9 +22,19 @@ use crossbeam::sync::chase_lev; use service::{HandlerId, IoChannel, IoContext}; use IoHandler; use panics::*; +use std::cell::Cell; use std::sync::{Condvar as SCondvar, Mutex as SMutex}; +const STACK_SIZE: usize = 16*1024*1024; + +thread_local! { + /// Stack size + /// Should be modified if it is changed in Rust since it is no way + /// to know or get it + pub static LOCAL_STACK_SIZE: Cell = Cell::new(::std::env::var("RUST_MIN_STACK").ok().and_then(|s| s.parse().ok()).unwrap_or(2 * 1024 * 1024)); +} + pub enum WorkType { Readable, Writable, @@ -66,8 +76,9 @@ impl Worker { deleting: deleting.clone(), wait_mutex: wait_mutex.clone(), }; - worker.thread = Some(thread::Builder::new().name(format!("IO Worker #{}", index)).spawn( + worker.thread = Some(thread::Builder::new().stack_size(STACK_SIZE).name(format!("IO Worker #{}", index)).spawn( move || { + LOCAL_STACK_SIZE.with(|val| val.set(STACK_SIZE)); panic_handler.catch_panic(move || { Worker::work_loop(stealer, channel.clone(), wait, wait_mutex.clone(), deleting) }).unwrap() diff --git a/util/network/src/host.rs b/util/network/src/host.rs index ebc10324f..a0d0a081a 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -47,7 +47,24 @@ type Slab = ::slab::Slab; const MAX_SESSIONS: usize = 1024 + MAX_HANDSHAKES; const MAX_HANDSHAKES: usize = 80; const MAX_HANDSHAKES_PER_ROUND: usize = 32; + +// Tokens +const TCP_ACCEPT: usize = SYS_TIMER + 1; +const IDLE: usize = SYS_TIMER + 2; +const DISCOVERY: usize = SYS_TIMER + 3; +const DISCOVERY_REFRESH: usize = SYS_TIMER + 4; +const DISCOVERY_ROUND: usize = SYS_TIMER + 5; +const NODE_TABLE: usize = SYS_TIMER + 6; +const FIRST_SESSION: usize = 0; +const LAST_SESSION: usize = FIRST_SESSION + MAX_SESSIONS - 1; +const USER_TIMER: usize = LAST_SESSION + 256; +const SYS_TIMER: usize = LAST_SESSION + 1; + +// Timeouts const MAINTENANCE_TIMEOUT: u64 = 1000; +const DISCOVERY_REFRESH_TIMEOUT: u64 = 7200; +const DISCOVERY_ROUND_TIMEOUT: u64 = 300; +const NODE_TABLE_TIMEOUT: u64 = 300_000; #[derive(Debug, PartialEq, Clone)] /// Network service configuration @@ -122,22 +139,10 @@ impl NetworkConfiguration { } } -// Tokens -const TCP_ACCEPT: usize = SYS_TIMER + 1; -const IDLE: usize = SYS_TIMER + 2; -const DISCOVERY: usize = SYS_TIMER + 3; -const DISCOVERY_REFRESH: usize = SYS_TIMER + 4; -const DISCOVERY_ROUND: usize = SYS_TIMER + 5; -const NODE_TABLE: usize = SYS_TIMER + 6; -const FIRST_SESSION: usize = 0; -const LAST_SESSION: usize = FIRST_SESSION + MAX_SESSIONS - 1; -const USER_TIMER: usize = LAST_SESSION + 256; -const SYS_TIMER: usize = LAST_SESSION + 1; - /// Protocol handler level packet id pub type PacketId = u8; /// Protocol / handler id -pub type ProtocolId = &'static str; +pub type ProtocolId = [u8; 3]; /// Messages used to communitate with the event loop from other threads. #[derive(Clone)] @@ -185,7 +190,7 @@ pub struct CapabilityInfo { impl Encodable for CapabilityInfo { fn rlp_append(&self, s: &mut RlpStream) { s.begin_list(2); - s.append(&self.protocol); + s.append(&&self.protocol[..]); s.append(&self.version); } } @@ -284,10 +289,13 @@ impl<'s> NetworkContext<'s> { } /// Returns max version for a given protocol. - pub fn protocol_version(&self, peer: PeerId, protocol: &str) -> Option { + pub fn protocol_version(&self, peer: PeerId, protocol: ProtocolId) -> Option { let session = self.resolve_session(peer); session.and_then(|s| s.lock().capability_version(protocol)) } + + /// Returns this object's subprotocol name. + pub fn subprotocol_name(&self) -> ProtocolId { self.protocol } } /// Shared host information @@ -561,11 +569,11 @@ impl Host { discovery.init_node_list(self.nodes.read().unordered_entries()); discovery.add_node_list(self.nodes.read().unordered_entries()); *self.discovery.lock() = Some(discovery); - io.register_stream(DISCOVERY).expect("Error registering UDP listener"); - io.register_timer(DISCOVERY_REFRESH, 7200).expect("Error registering discovery timer"); - io.register_timer(DISCOVERY_ROUND, 300).expect("Error registering discovery timer"); + try!(io.register_stream(DISCOVERY)); + try!(io.register_timer(DISCOVERY_REFRESH, DISCOVERY_REFRESH_TIMEOUT)); + try!(io.register_timer(DISCOVERY_ROUND, DISCOVERY_ROUND_TIMEOUT)); } - try!(io.register_timer(NODE_TABLE, 300_000)); + try!(io.register_timer(NODE_TABLE, NODE_TABLE_TIMEOUT)); try!(io.register_stream(TCP_ACCEPT)); Ok(()) } @@ -588,7 +596,8 @@ impl Host { } fn handshake_count(&self) -> usize { - self.sessions.read().count() - self.session_count() + // session_count < total_count is possible because of the data race. + self.sessions.read().count().saturating_sub(self.session_count()) } fn keep_alive(&self, io: &IoContext) { @@ -801,8 +810,8 @@ impl Host { } } for (p, _) in self.handlers.read().iter() { - if s.have_capability(p) { - ready_data.push(p); + if s.have_capability(*p) { + ready_data.push(*p); } } }, @@ -811,7 +820,7 @@ impl Host { protocol, packet_id, }) => { - match self.handlers.read().get(protocol) { + match self.handlers.read().get(&protocol) { None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) }, Some(_) => packet_data.push((protocol, packet_id, data)), } @@ -826,13 +835,13 @@ impl Host { } let handlers = self.handlers.read(); for p in ready_data { - let h = handlers.get(p).unwrap().clone(); + let h = handlers.get(&p).unwrap().clone(); self.stats.inc_sessions(); let reserved = self.reserved_nodes.read(); h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token); } for (p, packet_id, data) in packet_data { - let h = handlers.get(p).unwrap().clone(); + let h = handlers.get(&p).unwrap().clone(); let reserved = self.reserved_nodes.read(); h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]); } @@ -857,8 +866,8 @@ impl Host { if s.is_ready() { self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst); for (p, _) in self.handlers.read().iter() { - if s.have_capability(p) { - to_disconnect.push(p); + if s.have_capability(*p) { + to_disconnect.push(*p); } } } @@ -874,7 +883,7 @@ impl Host { } } for p in to_disconnect { - let h = self.handlers.read().get(p).unwrap().clone(); + let h = self.handlers.read().get(&p).unwrap().clone(); let reserved = self.reserved_nodes.read(); h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone(), &reserved), &token); } @@ -978,9 +987,10 @@ impl IoHandler for Host { NODE_TABLE => { trace!(target: "network", "Refreshing node table"); self.nodes.write().clear_useless(); + self.nodes.write().save(); }, _ => match self.timers.read().get(&token).cloned() { - Some(timer) => match self.handlers.read().get(timer.protocol).cloned() { + Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() { None => { warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) }, Some(h) => { let reserved = self.reserved_nodes.read(); @@ -1004,11 +1014,11 @@ impl IoHandler for Host { } => { let h = handler.clone(); let reserved = self.reserved_nodes.read(); - h.initialize(&NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved)); - self.handlers.write().insert(protocol, h); + h.initialize(&NetworkContext::new(io, *protocol, None, self.sessions.clone(), &reserved)); + self.handlers.write().insert(*protocol, h); let mut info = self.info.write(); for v in versions { - info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); + info.capabilities.push(CapabilityInfo { protocol: *protocol, version: *v, packet_count:0 }); } }, NetworkIoMessage::AddTimer { @@ -1023,7 +1033,7 @@ impl IoHandler for Host { *counter += 1; handler_token }; - self.timers.write().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token }); + self.timers.write().insert(handler_token, ProtocolTimer { protocol: *protocol, token: *token }); io.register_timer(handler_token, *delay).unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e)); }, NetworkIoMessage::Disconnect(ref peer) => { diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index bfcd49cea..cd0336823 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -45,7 +45,7 @@ //! //! fn main () { //! let mut service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service"); -//! service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]); +//! service.register_protocol(Arc::new(MyHandler), *b"myp", &[1u8]); //! service.start().expect("Error starting service"); //! //! // Wait for quit condition diff --git a/util/network/src/node_table.rs b/util/network/src/node_table.rs index 073e9ab76..c90e35a27 100644 --- a/util/network/src/node_table.rs +++ b/util/network/src/node_table.rs @@ -266,7 +266,8 @@ impl NodeTable { self.useless_nodes.clear(); } - fn save(&self) { + /// Save the nodes.json file. + pub fn save(&self) { if let Some(ref path) = self.path { let mut path_buf = PathBuf::from(path); if let Err(e) = fs::create_dir_all(path_buf.as_path()) { @@ -292,7 +293,7 @@ impl NodeTable { } }; if let Err(e) = file.write(&json.into_bytes()) { - warn!("Error writing node table file: {:?}", e); + warn!("Error writing node table file: {:?}", e); } } } diff --git a/util/network/src/session.rs b/util/network/src/session.rs index 164248d62..fdba12fff 100644 --- a/util/network/src/session.rs +++ b/util/network/src/session.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::{str, io}; use std::net::SocketAddr; -use std::io; use std::sync::*; use mio::*; use mio::tcp::*; @@ -63,7 +63,7 @@ pub enum SessionData { /// Packet data data: Vec, /// Packet protocol ID - protocol: &'static str, + protocol: [u8; 3], /// Zero based packet ID packet_id: u8, }, @@ -89,15 +89,21 @@ pub struct SessionInfo { #[derive(Debug, PartialEq, Eq)] pub struct PeerCapabilityInfo { - pub protocol: String, + pub protocol: ProtocolId, pub version: u8, } impl Decodable for PeerCapabilityInfo { fn decode(decoder: &D) -> Result where D: Decoder { let c = decoder.as_rlp(); + let p: Vec = try!(c.val_at(0)); + if p.len() != 3 { + return Err(DecoderError::Custom("Invalid subprotocol string length. Should be 3")); + } + let mut p2: ProtocolId = [0u8; 3]; + p2.clone_from_slice(&p); Ok(PeerCapabilityInfo { - protocol: try!(c.val_at(0)), + protocol: p2, version: try!(c.val_at(1)) }) } @@ -105,7 +111,7 @@ impl Decodable for PeerCapabilityInfo { #[derive(Debug)] struct SessionCapabilityInfo { - pub protocol: &'static str, + pub protocol: [u8; 3], pub version: u8, pub packet_count: u8, pub id_offset: u8, @@ -239,12 +245,12 @@ impl Session { } /// Checks if peer supports given capability - pub fn have_capability(&self, protocol: &str) -> bool { + pub fn have_capability(&self, protocol: [u8; 3]) -> bool { self.info.capabilities.iter().any(|c| c.protocol == protocol) } /// Checks if peer supports given capability - pub fn capability_version(&self, protocol: &str) -> Option { + pub fn capability_version(&self, protocol: [u8; 3]) -> Option { self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max() } @@ -270,10 +276,10 @@ impl Session { } /// Send a protocol packet to peer. - pub fn send_packet(&mut self, io: &IoContext, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), NetworkError> + pub fn send_packet(&mut self, io: &IoContext, protocol: [u8; 3], packet_id: u8, data: &[u8]) -> Result<(), NetworkError> where Message: Send + Sync + Clone { if self.info.capabilities.is_empty() || !self.had_hello { - debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), protocol, packet_id); + debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), str::from_utf8(&protocol[..]).unwrap_or("??"), packet_id); return Err(From::from(NetworkError::BadProtocol)); } if self.expired() { diff --git a/util/network/src/tests.rs b/util/network/src/tests.rs index 4186e549a..97a641c81 100644 --- a/util/network/src/tests.rs +++ b/util/network/src/tests.rs @@ -41,7 +41,7 @@ impl TestProtocol { /// Creates and register protocol with the network service pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc { let handler = Arc::new(TestProtocol::new(drop_session)); - service.register_protocol(handler.clone(), "test", &[42u8, 43u8]).expect("Error registering test protocol handler"); + service.register_protocol(handler.clone(), *b"tst", &[42u8, 43u8]).expect("Error registering test protocol handler"); handler } @@ -93,7 +93,7 @@ impl NetworkProtocolHandler for TestProtocol { fn net_service() { let service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service"); service.start().unwrap(); - service.register_protocol(Arc::new(TestProtocol::new(false)), "myproto", &[1u8]).unwrap(); + service.register_protocol(Arc::new(TestProtocol::new(false)), *b"myp", &[1u8]).unwrap(); } #[test] diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 708b8d870..df36918dd 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -143,12 +143,12 @@ impl CompactionProfile { } /// Database configuration -#[derive(Clone, Copy)] +#[derive(Clone)] pub struct DatabaseConfig { /// Max number of open files. pub max_open_files: i32, - /// Cache-size - pub cache_size: Option, + /// Cache sizes (in MiB) for specific columns. + pub cache_sizes: HashMap, usize>, /// Compaction profile pub compaction: CompactionProfile, /// Set number of columns @@ -159,17 +159,23 @@ pub struct DatabaseConfig { impl DatabaseConfig { /// Create new `DatabaseConfig` with default parameters and specified set of columns. + /// Note that cache sizes must be explicitly set. pub fn with_columns(columns: Option) -> Self { let mut config = Self::default(); config.columns = columns; config } + + /// Set the column cache size in MiB. + pub fn set_cache(&mut self, col: Option, size: usize) { + self.cache_sizes.insert(col, size); + } } impl Default for DatabaseConfig { fn default() -> DatabaseConfig { DatabaseConfig { - cache_size: None, + cache_sizes: HashMap::new(), max_open_files: 512, compaction: CompactionProfile::default(), columns: None, @@ -213,6 +219,9 @@ impl Database { /// Open database file. Creates if it does not exist. pub fn open(config: &DatabaseConfig, path: &str) -> Result { + // default cache size for columns not specified. + const DEFAULT_CACHE: usize = 2; + let mut opts = Options::new(); if let Some(rate_limit) = config.compaction.write_rate_limit { try!(opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))); @@ -232,17 +241,22 @@ impl Database { let mut cf_options = Vec::with_capacity(config.columns.unwrap_or(0) as usize); - for _ in 0 .. config.columns.unwrap_or(0) { + for col in 0 .. config.columns.unwrap_or(0) { let mut opts = Options::new(); opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction); opts.set_target_file_size_base(config.compaction.initial_file_size); opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier); - if let Some(cache_size) = config.cache_size { + + let col_opt = config.columns.map(|_| col); + + { + let cache_size = config.cache_sizes.get(&col_opt).cloned().unwrap_or(DEFAULT_CACHE); let mut block_opts = BlockBasedOptions::new(); - // all goes to read cache + // all goes to read cache. block_opts.set_cache(Cache::new(cache_size * 1024 * 1024)); opts.set_block_based_table_factory(&block_opts); } + cf_options.push(opts); } diff --git a/util/src/migration/mod.rs b/util/src/migration/mod.rs index cfd828086..80cfa29b6 100644 --- a/util/src/migration/mod.rs +++ b/util/src/migration/mod.rs @@ -20,7 +20,9 @@ mod tests; use std::collections::BTreeMap; use std::fs; +use std::fmt; use std::path::{Path, PathBuf}; +use std::sync::Arc; use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; @@ -96,20 +98,39 @@ pub enum Error { Custom(String), } +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match *self { + Error::CannotAddMigration => write!(f, "Cannot add migration"), + Error::MigrationImpossible => write!(f, "Migration impossible"), + Error::Io(ref err) => write!(f, "{}", err), + Error::Custom(ref err) => write!(f, "{}", err), + } + } +} + impl From<::std::io::Error> for Error { fn from(e: ::std::io::Error) -> Self { Error::Io(e) } } +impl From for Error { + fn from(e: String) -> Self { + Error::Custom(e) + } +} + /// A generalized migration from the given db to a destination db. pub trait Migration: 'static { + /// Number of columns in the database before the migration. + fn pre_columns(&self) -> Option { self.columns() } /// Number of columns in database after the migration. fn columns(&self) -> Option; /// Version of the database after the migration. fn version(&self) -> u32; /// Migrate a source to a destination. - fn migrate(&mut self, source: &Database, config: &Config, destination: &mut Database, col: Option) -> Result<(), Error>; + fn migrate(&mut self, source: Arc, config: &Config, destination: &mut Database, col: Option) -> Result<(), Error>; } /// A simple migration over key-value pairs. @@ -128,7 +149,7 @@ impl Migration for T { fn version(&self) -> u32 { SimpleMigration::version(self) } - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, col); for (key, value) in source.iter(col) { @@ -195,6 +216,7 @@ impl Manager { Some(last) => migration.version() > last.version(), None => true, }; + match is_new { true => Ok(self.migrations.push(Box::new(migration))), false => Err(Error::CannotAddMigration), @@ -205,12 +227,16 @@ impl Manager { /// and producing a path where the final migration lives. pub fn execute(&mut self, old_path: &Path, version: u32) -> Result { let config = self.config.clone(); - let columns = self.no_of_columns_at(version); let migrations = self.migrations_from(version); + trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len()); if migrations.is_empty() { return Err(Error::MigrationImpossible) }; + + let columns = migrations.iter().nth(0).and_then(|m| m.pre_columns()); + + trace!(target: "migration", "Expecting database to contain {:?} columns", columns); let mut db_config = DatabaseConfig { max_open_files: 64, - cache_size: None, + cache_sizes: Default::default(), compaction: config.compaction_profile, columns: columns, wal: true, @@ -222,7 +248,7 @@ impl Manager { // start with the old db. let old_path_str = try!(old_path.to_str().ok_or(Error::MigrationImpossible)); - let mut cur_db = try!(Database::open(&db_config, old_path_str).map_err(Error::Custom)); + let mut cur_db = Arc::new(try!(Database::open(&db_config, old_path_str).map_err(Error::Custom))); for migration in migrations { // Change number of columns in new db @@ -237,16 +263,16 @@ impl Manager { // perform the migration from cur_db to new_db. match current_columns { // migrate only default column - None => try!(migration.migrate(&cur_db, &config, &mut new_db, None)), + None => try!(migration.migrate(cur_db.clone(), &config, &mut new_db, None)), Some(v) => { // Migrate all columns in previous DB for col in 0..v { - try!(migration.migrate(&cur_db, &config, &mut new_db, Some(col))) + try!(migration.migrate(cur_db.clone(), &config, &mut new_db, Some(col))) } } } // next iteration, we will migrate from this db into the other temp. - cur_db = new_db; + cur_db = Arc::new(new_db); temp_idx.swap(); // remove the other temporary migration database. @@ -267,14 +293,6 @@ impl Manager { fn migrations_from(&mut self, version: u32) -> Vec<&mut Box> { self.migrations.iter_mut().filter(|m| m.version() > version).collect() } - - fn no_of_columns_at(&self, version: u32) -> Option { - let migration = self.migrations.iter().find(|m| m.version() == version); - match migration { - Some(m) => m.columns(), - None => None - } - } } /// Prints a dot every `max` ticks diff --git a/util/src/migration/tests.rs b/util/src/migration/tests.rs index ee5ff574e..57a5a9e32 100644 --- a/util/src/migration/tests.rs +++ b/util/src/migration/tests.rs @@ -19,7 +19,7 @@ //! are performed in temp sub-directories. use common::*; -use migration::{Config, SimpleMigration, Manager}; +use migration::{Batch, Config, Error, SimpleMigration, Migration, Manager}; use kvdb::Database; use devtools::RandomTempPath; @@ -62,11 +62,10 @@ impl SimpleMigration for Migration0 { fn version(&self) -> u32 { 1 } - fn simple_migrate(&mut self, key: Vec, value: Vec) -> Option<(Vec, Vec)> { - let mut key = key; + fn simple_migrate(&mut self, mut key: Vec, mut value: Vec) -> Option<(Vec, Vec)> { key.push(0x11); - let mut value = value; value.push(0x22); + Some((key, value)) } } @@ -83,6 +82,31 @@ impl SimpleMigration for Migration1 { } } +struct AddsColumn; + +impl Migration for AddsColumn { + fn pre_columns(&self) -> Option { None } + + fn columns(&self) -> Option { Some(1) } + + fn version(&self) -> u32 { 1 } + + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + let mut batch = Batch::new(config, col); + + for (key, value) in source.iter(col) { + try!(batch.insert(key.to_vec(), value.to_vec(), dest)); + } + + + if col == Some(1) { + try!(batch.insert(vec![1, 2, 3], vec![4, 5, 6], dest)); + } + + batch.commit(dest) + } +} + #[test] fn one_simple_migration() { let dir = RandomTempPath::create_dir(); @@ -189,3 +213,16 @@ fn is_migration_needed() { assert!(manager.is_needed(1)); assert!(!manager.is_needed(2)); } + +#[test] +fn pre_columns() { + let mut manager = Manager::new(Config::default()); + manager.add_migration(AddsColumn).unwrap(); + + let dir = RandomTempPath::create_dir(); + let db_path = db_path(dir.as_path()); + + // this shouldn't fail to open the database even though it's one column + // short of the one before it. + manager.execute(&db_path, 0).unwrap(); +} diff --git a/util/src/misc.rs b/util/src/misc.rs index 50b2e7e8d..b0452e85e 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -23,7 +23,7 @@ use target_info::Target; include!(concat!(env!("OUT_DIR"), "/version.rs")); include!(concat!(env!("OUT_DIR"), "/rustc_version.rs")); -#[derive(PartialEq,Eq,Clone,Copy)] +#[derive(PartialEq, Eq, Clone, Copy, Debug)] /// Boolean type for clean/dirty status. pub enum Filth { /// Data has not been changed. diff --git a/util/src/standard.rs b/util/src/standard.rs index 3d6c93e1a..0693dcd23 100644 --- a/util/src/standard.rs +++ b/util/src/standard.rs @@ -46,4 +46,4 @@ pub use rustc_serialize::hex::{FromHex, FromHexError}; pub use heapsize::HeapSizeOf; pub use itertools::Itertools; -pub use parking_lot::{Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; \ No newline at end of file +pub use parking_lot::{Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index 6eebd8f5d..952eb8894 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -233,4 +233,7 @@ impl TrieFactory { TrieSpec::Fat => Ok(Box::new(try!(FatDBMut::from_existing(db, root)))), } } + + /// Returns true iff the trie DB is a fat DB (allows enumeration of keys). + pub fn is_fat(&self) -> bool { self.spec == TrieSpec::Fat } }