diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 65e60d6eb..58036eb88 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,7 +1,6 @@ stages: - build - test - - deploy variables: GIT_DEPTH: "3" SIMPLECOV: "true" @@ -9,6 +8,48 @@ variables: cache: key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME" untracked: true +linux-stable: + stage: build + image: ethcore/rust:stable + only: + - master + - beta + - tags + - stable + script: + - cargo build --release --verbose + - strip target/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity + tags: + - rust + - rust-stable + artifacts: + paths: + - target/release/parity + name: "stable-x86_64-unknown-linux-gnu_parity" +linux-stable-14.04: + stage: build + image: ethcore/rust-14.04:latest + only: + - master + - beta + - tags + - stable + script: + - cargo build --release --verbose + - strip target/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity + tags: + - rust + - rust-14.04 + artifacts: + paths: + - target/release/parity + name: "stable-x86_64-unknown-ubuntu_14_04-gnu_parity" linux-beta: stage: build image: ethcore/rust:beta @@ -18,23 +59,16 @@ linux-beta: - tags - stable script: - - export - cargo build --release --verbose - strip target/release/parity - - cp target/release/parity parity tags: - rust - rust-beta artifacts: paths: - target/release/parity - name: "${CI_BUILD_NAME}_parity" - stage: deploy - tags: - - rust - - rust-beta - script: - - ./deploy.sh + name: "beta-x86_64-unknown-linux-gnu_parity" + allow_failure: true linux-nightly: stage: build image: ethcore/rust:nightly @@ -52,7 +86,7 @@ linux-nightly: artifacts: paths: - target/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "nigthly-x86_64-unknown-linux-gnu_parity" allow_failure: true linux-centos: stage: build @@ -67,23 +101,25 @@ linux-centos: - export CC="gcc" - cargo build --release --verbose - strip target/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity tags: - rust - rust-centos artifacts: paths: - target/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "x86_64-unknown-centos-gnu_parity" linux-armv7: stage: build - image: ethcore/rust-arm:latest + image: ethcore/rust-armv7:latest only: - master - beta - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config @@ -91,13 +127,17 @@ linux-armv7: - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity tags: - rust - rust-arm artifacts: paths: - target/armv7-unknown-linux-gnueabihf/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "armv7_unknown_linux_gnueabihf_parity" + allow_failure: true linux-arm: stage: build image: ethcore/rust-arm:latest @@ -107,7 +147,6 @@ linux-arm: - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config @@ -115,24 +154,26 @@ linux-arm: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity tags: - rust - rust-arm artifacts: paths: - target/arm-unknown-linux-gnueabihf/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "arm-unknown-linux-gnueabihf_parity" allow_failure: true linux-armv6: stage: build - image: ethcore/rust-arm:latest + image: ethcore/rust-armv6:latest only: - master - beta - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config @@ -140,24 +181,26 @@ linux-armv6: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release --verbose - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity tags: - rust - rust-arm artifacts: paths: - target/arm-unknown-linux-gnueabi/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "arm-unknown-linux-gnueabi_parity" allow_failure: true linux-aarch64: stage: build - image: ethcore/rust-arm:latest + image: ethcore/rust-aarch64:latest only: - master - beta - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config @@ -165,13 +208,16 @@ linux-aarch64: - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release --verbose - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity tags: - rust - rust-arm artifacts: paths: - target/aarch64-unknown-linux-gnu/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "aarch64-unknown-linux-gnu_parity" allow_failure: true darwin: stage: build @@ -182,12 +228,15 @@ darwin: - stable script: - cargo build --release --verbose + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity tags: - osx artifacts: paths: - target/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "x86_64-apple-darwin_parity" windows: stage: build only: @@ -201,37 +250,24 @@ windows: - set RUST_BACKTRACE=1 - rustup default stable-x86_64-pc-windows-msvc - cargo build --release --verbose + - aws configure set aws_access_key_id %s3_key% + - aws configure set aws_secret_access_key %s3_secret% + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb tags: - rust-windows artifacts: paths: - target/release/parity.exe - target/release/parity.pdb - name: "${CI_BUILD_NAME}_parity" -linux-stable: - stage: build - image: ethcore/rust:stable - only: - - master - - beta - - tags - - stable - script: - - export - - cargo build --release --verbose - - strip target/release/parity - tags: - - rust - - rust-stable - artifacts: - paths: - - target/release/parity - name: "${CI_BUILD_NAME}_parity" + name: "x86_64-pc-windows-msvc_parity" test-linux: stage: test before_script: - git submodule update --init --recursive script: - ./test.sh --verbose + tags: + - rust-test dependencies: - linux-stable diff --git a/Cargo.lock b/Cargo.lock index d9e5ced66..52bdee494 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,7 +3,7 @@ name = "parity" version = "1.4.0" dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", @@ -37,7 +37,10 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -84,16 +87,6 @@ name = "base64" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "bigint" -version = "0.1.0" -dependencies = [ - "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "bit-set" version = "0.4.0" @@ -147,15 +140,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "clippy" -version = "0.0.85" +version = "0.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "clippy_lints" -version = "0.0.85" +version = "0.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -277,7 +270,7 @@ version = "1.4.0" dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", @@ -290,13 +283,14 @@ dependencies = [ "ethjson 0.1.0", "ethkey 0.2.0", "ethstore 0.1.0", + "evmjit 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -304,18 +298,28 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethcore-bigint" +version = "0.1.0" +dependencies = [ + "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ethcore-dapps" version = "1.4.0" dependencies = [ - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", "https-fetch 0.1.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -390,6 +394,7 @@ name = "ethcore-ipc-nano" version = "1.4.0" dependencies = [ "ethcore-ipc 1.4.0", + "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)", ] @@ -449,19 +454,20 @@ dependencies = [ name = "ethcore-rpc" version = "1.4.0" dependencies = [ - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", "ethcore 1.4.0", "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", "ethcore-ipc 1.4.0", "ethcore-util 1.4.0", + "ethcrypto 0.1.0", "ethjson 0.1.0", "ethkey 0.2.0", "ethstore 0.1.0", "ethsync 1.4.0", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -477,13 +483,13 @@ dependencies = [ name = "ethcore-signer" version = "1.4.0" dependencies = [ - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-signer 1.4.0 (git+https://github.com/ethcore/parity-ui.git)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -502,7 +508,7 @@ dependencies = [ "ethcore-ipc-nano 1.4.0", "ethcore-util 1.4.0", "json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", @@ -515,11 +521,11 @@ version = "1.4.0" dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "bigint 0.1.0", - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", + "ethcore-bigint 0.1.0", "ethcore-devtools 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -546,8 +552,8 @@ dependencies = [ name = "ethcrypto" version = "0.1.0" dependencies = [ - "bigint 0.1.0", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", + "ethcore-bigint 0.1.0", "ethkey 0.2.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -568,9 +574,9 @@ dependencies = [ name = "ethkey" version = "0.2.0" dependencies = [ - "bigint 0.1.0", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", + "ethcore-bigint 0.1.0", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -601,7 +607,7 @@ dependencies = [ name = "ethsync" version = "1.4.0" dependencies = [ - "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.4.0", "ethcore-io 1.4.0", @@ -619,6 +625,13 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "evmjit" +version = "1.4.0" +dependencies = [ + "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "fdlimit" version = "0.1.0" @@ -770,7 +783,7 @@ source = "git+https://github.com/ethcore/json-ipc-server.git#5fbd0253750d3097b9a dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -785,7 +798,7 @@ source = "git+https://github.com/ethcore/json-tcp-server#c2858522274ae56042472bb dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -794,7 +807,7 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "3.0.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -807,10 +820,10 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "6.1.0" -source = "git+https://github.com/ethcore/jsonrpc-http-server.git#339f7209b01d26aea01722b3a69127235287d6a9" +source = "git+https://github.com/ethcore/jsonrpc-http-server.git#2766c6708f66f6f4e667211461d220b49c0d9fdf" dependencies = [ "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", - "jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1305,7 +1318,7 @@ dependencies = [ [[package]] name = "rayon" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1343,8 +1356,8 @@ dependencies = [ name = "rlp" version = "0.1.0" dependencies = [ - "bigint 0.1.0", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bigint 0.1.0", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1675,6 +1688,14 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "toml" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "traitobject" version = "0.0.1" @@ -1853,8 +1874,8 @@ dependencies = [ "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" -"checksum clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "97f6d6efa6d7aec74d4eca1be62164b605d43b7fcb5256e9db0449f685130cba" -"checksum clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "dc96d3c877b63943b08ce3037c0ae8fd3bd5dead5fab11178b93afc71ca16031" +"checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b" +"checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96" "checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245" "checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc" "checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "" @@ -1940,7 +1961,7 @@ dependencies = [ "checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c" "checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a" "checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5" -"checksum rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "941deb43a6254b9867fec1e0caeda38a2ad905ab18c57f7c68c396ca68998c07" +"checksum rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "655df67c314c30fa3055a365eae276eb88aa4f3413a352a1ab32c1320eda41ea" "checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29" "checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9" "checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb" @@ -1982,6 +2003,7 @@ dependencies = [ "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" +"checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72" "checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616" "checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0" "checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" diff --git a/Cargo.toml b/Cargo.toml index 112a36312..84edb6c1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ ansi_term = "0.7" lazy_static = "0.2" regex = "0.1" isatty = "0.1" +toml = "0.2" ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } ethcore = { path = "ethcore" } @@ -41,8 +42,10 @@ ethcore-logger = { path = "logger" } rlp = { path = "util/rlp" } json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } ethcore-dapps = { path = "dapps", optional = true } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} ethcore-stratum = { path = "stratum" } +serde = "0.8.0" +serde_json = "0.8.0" [target.'cfg(windows)'.dependencies] winapi = "0.2" @@ -60,11 +63,13 @@ ui = ["dapps", "ethcore-signer/ui"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] dapps = ["ethcore-dapps"] ipc = ["ethcore/ipc"] +jit = ["ethcore/jit"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] stratum = ["ipc"] ethkey-cli = ["ethcore/ethkey-cli"] ethstore-cli = ["ethcore/ethstore-cli"] +evm-debug = ["ethcore/evm-debug"] [[bin]] path = "parity/main.rs" diff --git a/README.md b/README.md index 26913183c..d5fb5f044 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,8 @@ Be sure to check out [our wiki][wiki-url] for more information. [gitter-image]: https://badges.gitter.im/Join%20Chat.svg [gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge [license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg -[license-url]: http://www.gnu.org/licenses/gpl-3.0.en.html -[doc-url]: http://ethcore.github.io/parity/ethcore/index.html +[license-url]: https://www.gnu.org/licenses/gpl-3.0.en.html +[doc-url]: https://ethcore.github.io/parity/ethcore/index.html [wiki-url]: https://github.com/ethcore/parity/wiki ---- diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 531f7da1b..b1883e748 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -33,13 +33,13 @@ parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", versio parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true } mime_guess = { version = "1.6.1" } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} [build-dependencies] serde_codegen = { version = "0.8", optional = true } [features] -default = ["serde_codegen", "extra-dapps", "https-fetch/ca-github-only"] +default = ["serde_codegen", "extra-dapps"] extra-dapps = ["parity-dapps-wallet"] nightly = ["serde_macros"] dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 84db93f63..9a8dfef95 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -15,23 +15,26 @@ // along with Parity. If not, see . use std::sync::Arc; -use hyper::{server, net, Decoder, Encoder, Next}; +use hyper::{server, net, Decoder, Encoder, Next, Control}; use api::types::{App, ApiError}; use api::response::{as_json, as_json_error, ping_response}; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; +use apps::fetcher::ContentFetcher; #[derive(Clone)] pub struct RestApi { local_domain: String, endpoints: Arc, + fetcher: Arc, } impl RestApi { - pub fn new(local_domain: String, endpoints: Arc) -> Box { + pub fn new(local_domain: String, endpoints: Arc, fetcher: Arc) -> Box { Box::new(RestApi { local_domain: local_domain, endpoints: endpoints, + fetcher: fetcher, }) } @@ -43,23 +46,42 @@ impl RestApi { } impl Endpoint for RestApi { - fn to_handler(&self, _path: EndpointPath) -> Box { - Box::new(RestApiRouter { - api: self.clone(), - handler: as_json_error(&ApiError { - code: "404".into(), - title: "Not Found".into(), - detail: "Resource you requested has not been found.".into(), - }), - }) + fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box { + Box::new(RestApiRouter::new(self.clone(), path, control)) } } struct RestApiRouter { api: RestApi, + path: Option, + control: Option, handler: Box, } +impl RestApiRouter { + fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { + RestApiRouter { + path: Some(path), + control: Some(control), + api: api, + handler: as_json_error(&ApiError { + code: "404".into(), + title: "Not Found".into(), + detail: "Resource you requested has not been found.".into(), + }), + } + } + + fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option> { + match hash { + Some(hash) if self.api.fetcher.contains(hash) => { + Some(self.api.fetcher.to_async_handler(path, control)) + }, + _ => None + } + } +} + impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { @@ -69,13 +91,18 @@ impl server::Handler for RestApiRouter { return Next::write(); } - let url = url.expect("Check for None is above; qed"); + let url = url.expect("Check for None early-exists above; qed"); + let path = self.path.take().expect("on_request called only once, and path is always defined in new; qed"); + let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed"); + let endpoint = url.path.get(1).map(|v| v.as_str()); + let hash = url.path.get(2).map(|v| v.as_str()); let handler = endpoint.and_then(|v| match v { "apps" => Some(as_json(&self.api.list_apps())), "ping" => Some(ping_response(&self.api.local_domain)), - _ => None, + "content" => self.resolve_content(hash, path, control), + _ => None }); // Overwrite default diff --git a/dapps/src/apps/cache.rs b/dapps/src/apps/cache.rs index b5acbcb15..be9521cf9 100644 --- a/dapps/src/apps/cache.rs +++ b/dapps/src/apps/cache.rs @@ -18,13 +18,13 @@ use std::fs; use std::sync::{Arc}; -use std::sync::atomic::{AtomicBool, Ordering}; use linked_hash_map::LinkedHashMap; use page::LocalPageEndpoint; +use handlers::FetchControl; pub enum ContentStatus { - Fetching(Arc), + Fetching(Arc), Ready(LocalPageEndpoint), } @@ -57,10 +57,10 @@ impl ContentCache { while len > expected_size { let entry = self.cache.pop_front().unwrap(); match entry.1 { - ContentStatus::Fetching(ref abort) => { + ContentStatus::Fetching(ref fetch) => { trace!(target: "dapps", "Aborting {} because of limit.", entry.0); // Mark as aborted - abort.store(true, Ordering::SeqCst); + fetch.abort() }, ContentStatus::Ready(ref endpoint) => { trace!(target: "dapps", "Removing {} because of limit.", entry.0); diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 502fbe4aa..8702e4706 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -23,7 +23,6 @@ use std::{fs, env, fmt}; use std::io::{self, Read, Write}; use std::path::PathBuf; use std::sync::Arc; -use std::sync::atomic::{AtomicBool}; use rustc_serialize::hex::FromHex; use hyper; @@ -38,65 +37,67 @@ use handlers::{ContentHandler, ContentFetcherHandler, ContentValidator}; use endpoint::{Endpoint, EndpointPath, Handler}; use apps::cache::{ContentCache, ContentStatus}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; -use apps::urlhint::{URLHintContract, URLHint}; +use apps::urlhint::{URLHintContract, URLHint, URLHintResult}; const MAX_CACHED_DAPPS: usize = 10; -pub struct AppFetcher { +pub struct ContentFetcher { dapps_path: PathBuf, resolver: R, + cache: Arc>, sync: Arc, - dapps: Arc>, } -impl Drop for AppFetcher { +impl Drop for ContentFetcher { fn drop(&mut self) { // Clear cache path let _ = fs::remove_dir_all(&self.dapps_path); } } -impl AppFetcher { +impl ContentFetcher { pub fn new(resolver: R, sync_status: Arc) -> Self { let mut dapps_path = env::temp_dir(); dapps_path.push(random_filename()); - AppFetcher { + ContentFetcher { dapps_path: dapps_path, resolver: resolver, sync: sync_status, - dapps: Arc::new(Mutex::new(ContentCache::default())), + cache: Arc::new(Mutex::new(ContentCache::default())), } } #[cfg(test)] - fn set_status(&self, app_id: &str, status: ContentStatus) { - self.dapps.lock().insert(app_id.to_owned(), status); + fn set_status(&self, content_id: &str, status: ContentStatus) { + self.cache.lock().insert(content_id.to_owned(), status); } - pub fn contains(&self, app_id: &str) -> bool { - let mut dapps = self.dapps.lock(); - // Check if we already have the app - if dapps.get(app_id).is_some() { - return true; + pub fn contains(&self, content_id: &str) -> bool { + { + let mut cache = self.cache.lock(); + // Check if we already have the app + if cache.get(content_id).is_some() { + return true; + } } // fallback to resolver - if let Ok(app_id) = app_id.from_hex() { + if let Ok(content_id) = content_id.from_hex() { // if app_id is valid, but we are syncing always return true. if self.sync.is_major_syncing() { return true; } // else try to resolve the app_id - self.resolver.resolve(app_id).is_some() + self.resolver.resolve(content_id).is_some() } else { false } } pub fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box { - let mut dapps = self.dapps.lock(); - let app_id = path.app_id.clone(); + let mut cache = self.cache.lock(); + let content_id = path.app_id.clone(); if self.sync.is_major_syncing() { return Box::new(ContentHandler::error( @@ -108,57 +109,85 @@ impl AppFetcher { } let (new_status, handler) = { - let status = dapps.get(&app_id); + let status = cache.get(&content_id); match status { // Just server dapp Some(&mut ContentStatus::Ready(ref endpoint)) => { (None, endpoint.to_async_handler(path, control)) }, // App is already being fetched - Some(&mut ContentStatus::Fetching(_)) => { - (None, Box::new(ContentHandler::error_with_refresh( - StatusCode::ServiceUnavailable, - "Download In Progress", - "This dapp is already being downloaded. Please wait...", - None, - )) as Box) + Some(&mut ContentStatus::Fetching(ref fetch_control)) => { + trace!(target: "dapps", "Content fetching in progress. Waiting..."); + (None, fetch_control.to_handler(control)) }, // We need to start fetching app None => { - let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true."); - let app = self.resolver.resolve(app_hex); + trace!(target: "dapps", "Content unavailable. Fetching..."); + let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true."); + let content = self.resolver.resolve(content_hex); - if let Some(app) = app { - let abort = Arc::new(AtomicBool::new(false)); + let cache = self.cache.clone(); + let on_done = move |id: String, result: Option| { + let mut cache = cache.lock(); + match result { + Some(endpoint) => { + cache.insert(id, ContentStatus::Ready(endpoint)); + }, + // In case of error + None => { + cache.remove(&id); + }, + } + }; - (Some(ContentStatus::Fetching(abort.clone())), Box::new(ContentFetcherHandler::new( - app, - abort, - control, - path.using_dapps_domains, - DappInstaller { - dapp_id: app_id.clone(), - dapps_path: self.dapps_path.clone(), - dapps: self.dapps.clone(), - } - )) as Box) - } else { - // This may happen when sync status changes in between - // `contains` and `to_handler` - (None, Box::new(ContentHandler::error( - StatusCode::NotFound, - "Resource Not Found", - "Requested resource was not found.", - None - )) as Box) + match content { + Some(URLHintResult::Dapp(dapp)) => { + let (handler, fetch_control) = ContentFetcherHandler::new( + dapp.url(), + control, + path.using_dapps_domains, + DappInstaller { + id: content_id.clone(), + dapps_path: self.dapps_path.clone(), + on_done: Box::new(on_done), + } + ); + + (Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box) + }, + Some(URLHintResult::Content(content)) => { + let (handler, fetch_control) = ContentFetcherHandler::new( + content.url, + control, + path.using_dapps_domains, + ContentInstaller { + id: content_id.clone(), + mime: content.mime, + content_path: self.dapps_path.clone(), + on_done: Box::new(on_done), + } + ); + + (Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box) + }, + None => { + // This may happen when sync status changes in between + // `contains` and `to_handler` + (None, Box::new(ContentHandler::error( + StatusCode::NotFound, + "Resource Not Found", + "Requested resource was not found.", + None + )) as Box) + }, } }, } }; if let Some(status) = new_status { - dapps.clear_garbage(MAX_CACHED_DAPPS); - dapps.insert(app_id, status); + cache.clear_garbage(MAX_CACHED_DAPPS); + cache.insert(content_id, status); } handler @@ -169,7 +198,7 @@ impl AppFetcher { pub enum ValidationError { Io(io::Error), Zip(zip::result::ZipError), - InvalidDappId, + InvalidContentId, ManifestNotFound, ManifestSerialization(String), HashMismatch { expected: H256, got: H256, }, @@ -180,7 +209,7 @@ impl fmt::Display for ValidationError { match *self { ValidationError::Io(ref io) => write!(f, "Unexpected IO error occured: {:?}", io), ValidationError::Zip(ref zip) => write!(f, "Unable to read ZIP archive: {:?}", zip), - ValidationError::InvalidDappId => write!(f, "Dapp ID is invalid. It should be 32 bytes hash of content."), + ValidationError::InvalidContentId => write!(f, "ID is invalid. It should be 256 bits keccak hash of content."), ValidationError::ManifestNotFound => write!(f, "Downloaded Dapp bundle did not contain valid manifest.json file."), ValidationError::ManifestSerialization(ref err) => { write!(f, "There was an error during Dapp Manifest serialization: {:?}", err) @@ -204,10 +233,44 @@ impl From for ValidationError { } } +struct ContentInstaller { + id: String, + mime: String, + content_path: PathBuf, + on_done: Box) + Send>, +} + +impl ContentValidator for ContentInstaller { + type Error = ValidationError; + + fn validate_and_install(&self, path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> { + // Create dir + try!(fs::create_dir_all(&self.content_path)); + + // And prepare path for a file + let filename = path.file_name().expect("We always fetch a file."); + let mut content_path = self.content_path.clone(); + content_path.push(&filename); + + if content_path.exists() { + try!(fs::remove_dir_all(&content_path)) + } + + try!(fs::copy(&path, &content_path)); + + Ok((self.id.clone(), LocalPageEndpoint::single_file(content_path, self.mime.clone()))) + } + + fn done(&self, endpoint: Option) { + (self.on_done)(self.id.clone(), endpoint) + } +} + + struct DappInstaller { - dapp_id: String, + id: String, dapps_path: PathBuf, - dapps: Arc>, + on_done: Box) + Send>, } impl DappInstaller { @@ -245,14 +308,14 @@ impl DappInstaller { impl ContentValidator for DappInstaller { type Error = ValidationError; - fn validate_and_install(&self, app_path: PathBuf) -> Result { + fn validate_and_install(&self, app_path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> { trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path); let mut file_reader = io::BufReader::new(try!(fs::File::open(app_path))); let hash = try!(sha3(&mut file_reader)); - let dapp_id = try!(self.dapp_id.as_str().parse().map_err(|_| ValidationError::InvalidDappId)); - if dapp_id != hash { + let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId)); + if id != hash { return Err(ValidationError::HashMismatch { - expected: dapp_id, + expected: id, got: hash, }); } @@ -262,7 +325,7 @@ impl ContentValidator for DappInstaller { // First find manifest file let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip)); // Overwrite id to match hash - manifest.id = self.dapp_id.clone(); + manifest.id = self.id.clone(); let target = self.dapp_target_path(&manifest); @@ -299,23 +362,15 @@ impl ContentValidator for DappInstaller { let mut manifest_file = try!(fs::File::create(manifest_path)); try!(manifest_file.write_all(manifest_str.as_bytes())); + // Create endpoint + let app = LocalPageEndpoint::new(target, manifest.clone().into()); + // Return modified app manifest - Ok(manifest) + Ok((manifest.id.clone(), app)) } - fn done(&self, manifest: Option<&Manifest>) { - let mut dapps = self.dapps.lock(); - match manifest { - Some(manifest) => { - let path = self.dapp_target_path(manifest); - let app = LocalPageEndpoint::new(path, manifest.clone().into()); - dapps.insert(self.dapp_id.clone(), ContentStatus::Ready(app)); - }, - // In case of error - None => { - dapps.remove(&self.dapp_id); - }, - } + fn done(&self, endpoint: Option) { + (self.on_done)(self.id.clone(), endpoint) } } @@ -327,12 +382,12 @@ mod tests { use endpoint::EndpointInfo; use page::LocalPageEndpoint; use apps::cache::ContentStatus; - use apps::urlhint::{GithubApp, URLHint}; - use super::AppFetcher; + use apps::urlhint::{URLHint, URLHintResult}; + use super::ContentFetcher; struct FakeResolver; impl URLHint for FakeResolver { - fn resolve(&self, _app_id: Bytes) -> Option { + fn resolve(&self, _id: Bytes) -> Option { None } } @@ -341,7 +396,7 @@ mod tests { fn should_true_if_contains_the_app() { // given let path = env::temp_dir(); - let fetcher = AppFetcher::new(FakeResolver, Arc::new(|| false)); + let fetcher = ContentFetcher::new(FakeResolver, Arc::new(|| false)); let handler = LocalPageEndpoint::new(path, EndpointInfo { name: "fake".into(), description: "".into(), diff --git a/dapps/src/apps/urlhint.rs b/dapps/src/apps/urlhint.rs index f57e5e0d7..2b86c0777 100644 --- a/dapps/src/apps/urlhint.rs +++ b/dapps/src/apps/urlhint.rs @@ -17,6 +17,7 @@ use std::fmt; use std::sync::Arc; use rustc_serialize::hex::ToHex; +use mime_guess; use ethabi::{Interface, Contract, Token}; use util::{Address, Bytes, Hashable}; @@ -52,6 +53,13 @@ impl GithubApp { } } +#[derive(Debug, PartialEq)] +pub struct Content { + pub url: String, + pub mime: String, + pub owner: Address, +} + /// RAW Contract interface. /// Should execute transaction using current blockchain state. pub trait ContractClient: Send + Sync { @@ -61,10 +69,19 @@ pub trait ContractClient: Send + Sync { fn call(&self, address: Address, data: Bytes) -> Result; } +/// Result of resolving id to URL +#[derive(Debug, PartialEq)] +pub enum URLHintResult { + /// Dapp + Dapp(GithubApp), + /// Content + Content(Content), +} + /// URLHint Contract interface pub trait URLHint { /// Resolves given id to registrar entry. - fn resolve(&self, app_id: Bytes) -> Option; + fn resolve(&self, id: Bytes) -> Option; } pub struct URLHintContract { @@ -110,10 +127,10 @@ impl URLHintContract { } } - fn encode_urlhint_call(&self, app_id: Bytes) -> Option { + fn encode_urlhint_call(&self, id: Bytes) -> Option { let call = self.urlhint .function("entries".into()) - .and_then(|f| f.encode_call(vec![Token::FixedBytes(app_id)])); + .and_then(|f| f.encode_call(vec![Token::FixedBytes(id)])); match call { Ok(res) => { @@ -126,7 +143,7 @@ impl URLHintContract { } } - fn decode_urlhint_output(&self, output: Bytes) -> Option { + fn decode_urlhint_output(&self, output: Bytes) -> Option { trace!(target: "dapps", "Output: {:?}", output.to_hex()); let output = self.urlhint .function("entries".into()) @@ -149,6 +166,17 @@ impl URLHintContract { if owner == Address::default() { return None; } + + let commit = GithubApp::commit(&commit); + if commit == Some(Default::default()) { + let mime = guess_mime_type(&account_slash_repo).unwrap_or("application/octet-stream".into()); + return Some(URLHintResult::Content(Content { + url: account_slash_repo, + mime: mime, + owner: owner, + })); + } + let (account, repo) = { let mut it = account_slash_repo.split('/'); match (it.next(), it.next()) { @@ -157,12 +185,12 @@ impl URLHintContract { } }; - GithubApp::commit(&commit).map(|commit| GithubApp { + commit.map(|commit| URLHintResult::Dapp(GithubApp { account: account, repo: repo, commit: commit, owner: owner, - }) + })) }, e => { warn!(target: "dapps", "Invalid contract output parameters: {:?}", e); @@ -177,10 +205,10 @@ impl URLHintContract { } impl URLHint for URLHintContract { - fn resolve(&self, app_id: Bytes) -> Option { + fn resolve(&self, id: Bytes) -> Option { self.urlhint_address().and_then(|address| { // Prepare contract call - self.encode_urlhint_call(app_id) + self.encode_urlhint_call(id) .and_then(|data| { let call = self.client.call(address, data); if let Err(ref e) = call { @@ -193,6 +221,34 @@ impl URLHint for URLHintContract { } } +fn guess_mime_type(url: &str) -> Option { + const CONTENT_TYPE: &'static str = "content-type="; + + let mut it = url.split('#'); + // skip url + let url = it.next(); + // get meta headers + let metas = it.next(); + if let Some(metas) = metas { + for meta in metas.split('&') { + let meta = meta.to_lowercase(); + if meta.starts_with(CONTENT_TYPE) { + return Some(meta[CONTENT_TYPE.len()..].to_owned()); + } + } + } + url.and_then(|url| { + url.split('.').last() + }).and_then(|extension| { + mime_guess::get_mime_type_str(extension).map(Into::into) + }) +} + +#[cfg(test)] +pub fn test_guess_mime_type(url: &str) -> Option { + guess_mime_type(url) +} + fn as_string(e: T) -> String { format!("{:?}", e) } @@ -201,7 +257,7 @@ fn as_string(e: T) -> String { mod tests { use std::sync::Arc; use std::str::FromStr; - use rustc_serialize::hex::{ToHex, FromHex}; + use rustc_serialize::hex::FromHex; use super::*; use util::{Bytes, Address, Mutex, ToPretty}; @@ -279,12 +335,33 @@ mod tests { let res = urlhint.resolve("test".bytes().collect()); // then - assert_eq!(res, Some(GithubApp { + assert_eq!(res, Some(URLHintResult::Dapp(GithubApp { account: "ethcore".into(), repo: "dao.claim".into(), commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(), owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), - })) + }))) + } + + #[test] + fn should_decode_urlhint_content_output() { + // given + let mut registrar = FakeRegistrar::new(); + registrar.responses = Mutex::new(vec![ + Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), + Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003d68747470733a2f2f657468636f72652e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e67000000".from_hex().unwrap()), + ]); + let urlhint = URLHintContract::new(Arc::new(registrar)); + + // when + let res = urlhint.resolve("test".bytes().collect()); + + // then + assert_eq!(res, Some(URLHintResult::Content(Content { + url: "https://ethcore.io/assets/images/ethcore-black-horizontal.png".into(), + mime: "image/png".into(), + owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), + }))) } #[test] @@ -303,4 +380,20 @@ mod tests { // then assert_eq!(url, "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213".to_owned()); } + + #[test] + fn should_guess_mime_type_from_url() { + let url1 = "https://ethcore.io/parity"; + let url2 = "https://ethcore.io/parity#content-type=image/png"; + let url3 = "https://ethcore.io/parity#something&content-type=image/png"; + let url4 = "https://ethcore.io/parity.png#content-type=image/jpeg"; + let url5 = "https://ethcore.io/parity.png"; + + + assert_eq!(test_guess_mime_type(url1), None); + assert_eq!(test_guess_mime_type(url2), Some("image/png".into())); + assert_eq!(test_guess_mime_type(url3), Some("image/png".into())); + assert_eq!(test_guess_mime_type(url4), Some("image/jpeg".into())); + assert_eq!(test_guess_mime_type(url5), Some("image/png".into())); + } } diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index 51e863f19..eea7a872f 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -42,7 +42,9 @@ pub type Handler = server::Handler + Send; pub trait Endpoint : Send + Sync { fn info(&self) -> Option<&EndpointInfo> { None } - fn to_handler(&self, path: EndpointPath) -> Box; + fn to_handler(&self, _path: EndpointPath) -> Box { + panic!("This Endpoint is asynchronous and requires Control object."); + } fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box { self.to_handler(path) diff --git a/dapps/src/error_tpl.html b/dapps/src/error_tpl.html index 6551431a6..c6b4db0e7 100644 --- a/dapps/src/error_tpl.html +++ b/dapps/src/error_tpl.html @@ -3,7 +3,6 @@ - {meta} {title} diff --git a/dapps/src/handlers/client/mod.rs b/dapps/src/handlers/client/mod.rs index 181f60001..3d8551e8a 100644 --- a/dapps/src/handlers/client/mod.rs +++ b/dapps/src/handlers/client/mod.rs @@ -63,7 +63,7 @@ impl Client { self.https_client.close(); } - pub fn request(&mut self, url: String, abort: Arc, on_done: Box) -> Result, FetchError> { + pub fn request(&mut self, url: &str, abort: Arc, on_done: Box) -> Result, FetchError> { let is_https = url.starts_with("https://"); let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl)); trace!(target: "dapps", "Fetching from: {:?}", url); diff --git a/dapps/src/handlers/content.rs b/dapps/src/handlers/content.rs index f283fbb6a..4dc011475 100644 --- a/dapps/src/handlers/content.rs +++ b/dapps/src/handlers/content.rs @@ -23,6 +23,7 @@ use hyper::status::StatusCode; use util::version; +#[derive(Clone)] pub struct ContentHandler { code: StatusCode, content: String, @@ -57,18 +58,6 @@ impl ContentHandler { Self::html(code, format!( include_str!("../error_tpl.html"), title=title, - meta="", - message=message, - details=details.unwrap_or_else(|| ""), - version=version(), - )) - } - - pub fn error_with_refresh(code: StatusCode, title: &str, message: &str, details: Option<&str>) -> Self { - Self::html(code, format!( - include_str!("../error_tpl.html"), - title=title, - meta="", message=message, details=details.unwrap_or_else(|| ""), version=version(), diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 98242f2b3..c463d3710 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -16,78 +16,160 @@ //! Hyper Server Handler that fetches a file during a request (proxy). -use std::fmt; +use std::{fs, fmt}; use std::path::PathBuf; use std::sync::{mpsc, Arc}; -use std::sync::atomic::AtomicBool; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Instant, Duration}; +use util::Mutex; -use hyper::{header, server, Decoder, Encoder, Next, Method, Control}; +use hyper::{server, Decoder, Encoder, Next, Method, Control}; use hyper::net::HttpStream; use hyper::status::StatusCode; -use handlers::ContentHandler; +use handlers::{ContentHandler, Redirection}; use handlers::client::{Client, FetchResult}; use apps::redirection_address; -use apps::urlhint::GithubApp; -use apps::manifest::Manifest; +use page::LocalPageEndpoint; const FETCH_TIMEOUT: u64 = 30; enum FetchState { - NotStarted(GithubApp), + NotStarted(String), Error(ContentHandler), - InProgress { - deadline: Instant, - receiver: mpsc::Receiver, - }, - Done(Manifest), + InProgress(mpsc::Receiver), + Done(String, LocalPageEndpoint, Redirection), } pub trait ContentValidator { type Error: fmt::Debug + fmt::Display; - fn validate_and_install(&self, app: PathBuf) -> Result; - fn done(&self, Option<&Manifest>); + fn validate_and_install(&self, app: PathBuf) -> Result<(String, LocalPageEndpoint), Self::Error>; + fn done(&self, Option); +} + +pub struct FetchControl { + abort: Arc, + listeners: Mutex)>>, + deadline: Instant, +} + +impl Default for FetchControl { + fn default() -> Self { + FetchControl { + abort: Arc::new(AtomicBool::new(false)), + listeners: Mutex::new(Vec::new()), + deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT), + } + } +} + +impl FetchControl { + fn notify FetchState>(&self, status: F) { + let mut listeners = self.listeners.lock(); + for (control, sender) in listeners.drain(..) { + if let Err(e) = sender.send(status()) { + trace!(target: "dapps", "Waiting listener notification failed: {:?}", e); + } else { + let _ = control.ready(Next::read()); + } + } + } + + fn set_status(&self, status: &FetchState) { + match *status { + FetchState::Error(ref handler) => self.notify(|| FetchState::Error(handler.clone())), + FetchState::Done(ref id, ref endpoint, ref handler) => self.notify(|| FetchState::Done(id.clone(), endpoint.clone(), handler.clone())), + FetchState::NotStarted(_) | FetchState::InProgress(_) => {}, + } + } + + pub fn abort(&self) { + self.abort.store(true, Ordering::SeqCst); + } + + pub fn to_handler(&self, control: Control) -> Box + Send> { + let (tx, rx) = mpsc::channel(); + self.listeners.lock().push((control, tx)); + + Box::new(WaitingHandler { + receiver: rx, + state: None, + }) + } +} + +pub struct WaitingHandler { + receiver: mpsc::Receiver, + state: Option, +} + +impl server::Handler for WaitingHandler { + fn on_request(&mut self, _request: server::Request) -> Next { + Next::wait() + } + + fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { + self.state = self.receiver.try_recv().ok(); + Next::write() + } + + fn on_response(&mut self, res: &mut server::Response) -> Next { + match self.state { + Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response(res), + Some(FetchState::Error(ref mut handler)) => handler.on_response(res), + _ => Next::end(), + } + } + + fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { + match self.state { + Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response_writable(encoder), + Some(FetchState::Error(ref mut handler)) => handler.on_response_writable(encoder), + _ => Next::end(), + } + } } pub struct ContentFetcherHandler { - abort: Arc, + fetch_control: Arc, control: Option, status: FetchState, client: Option, using_dapps_domains: bool, - dapp: H, + installer: H, } impl Drop for ContentFetcherHandler { fn drop(&mut self) { - let manifest = match self.status { - FetchState::Done(ref manifest) => Some(manifest), + let result = match self.status { + FetchState::Done(_, ref result, _) => Some(result.clone()), _ => None, }; - self.dapp.done(manifest); + self.installer.done(result); } } impl ContentFetcherHandler { pub fn new( - app: GithubApp, - abort: Arc, + url: String, control: Control, using_dapps_domains: bool, - handler: H) -> Self { + handler: H) -> (Self, Arc) { + let fetch_control = Arc::new(FetchControl::default()); let client = Client::new(); - ContentFetcherHandler { - abort: abort, + let handler = ContentFetcherHandler { + fetch_control: fetch_control.clone(), control: Some(control), client: Some(client), - status: FetchState::NotStarted(app), + status: FetchState::NotStarted(url), using_dapps_domains: using_dapps_domains, - dapp: handler, - } + installer: handler, + }; + + (handler, fetch_control) } fn close_client(client: &mut Option) { @@ -96,9 +178,8 @@ impl ContentFetcherHandler { .close(); } - - fn fetch_app(client: &mut Client, app: &GithubApp, abort: Arc, control: Control) -> Result, String> { - client.request(app.url(), abort, Box::new(move || { + fn fetch_content(client: &mut Client, url: &str, abort: Arc, control: Control) -> Result, String> { + client.request(url, abort, Box::new(move || { trace!(target: "dapps", "Fetching finished."); // Ignoring control errors let _ = control.ready(Next::read()); @@ -108,19 +189,16 @@ impl ContentFetcherHandler { impl server::Handler for ContentFetcherHandler { fn on_request(&mut self, request: server::Request) -> Next { - let status = if let FetchState::NotStarted(ref app) = self.status { + let status = if let FetchState::NotStarted(ref url) = self.status { Some(match *request.method() { // Start fetching content Method::Get => { - trace!(target: "dapps", "Fetching dapp: {:?}", app); + trace!(target: "dapps", "Fetching content from: {:?}", url); let control = self.control.take().expect("on_request is called only once, thus control is always Some"); let client = self.client.as_mut().expect("on_request is called before client is closed."); - let fetch = Self::fetch_app(client, app, self.abort.clone(), control); + let fetch = Self::fetch_content(client, url, self.fetch_control.abort.clone(), control); match fetch { - Ok(receiver) => FetchState::InProgress { - deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT), - receiver: receiver, - }, + Ok(receiver) => FetchState::InProgress(receiver), Err(e) => FetchState::Error(ContentHandler::error( StatusCode::BadGateway, "Unable To Start Dapp Download", @@ -140,6 +218,7 @@ impl server::Handler for ContentFetcherHandler< } else { None }; if let Some(status) = status { + self.fetch_control.set_status(&status); self.status = status; } @@ -149,49 +228,51 @@ impl server::Handler for ContentFetcherHandler< fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { let (status, next) = match self.status { // Request may time out - FetchState::InProgress { ref deadline, .. } if *deadline < Instant::now() => { + FetchState::InProgress(_) if self.fetch_control.deadline < Instant::now() => { trace!(target: "dapps", "Fetching dapp failed because of timeout."); let timeout = ContentHandler::error( StatusCode::GatewayTimeout, "Download Timeout", - &format!("Could not fetch dapp bundle within {} seconds.", FETCH_TIMEOUT), + &format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT), None ); Self::close_client(&mut self.client); (Some(FetchState::Error(timeout)), Next::write()) }, - FetchState::InProgress { ref receiver, .. } => { + FetchState::InProgress(ref receiver) => { // Check if there is an answer let rec = receiver.try_recv(); match rec { // Unpack and validate Ok(Ok(path)) => { - trace!(target: "dapps", "Fetching dapp finished. Starting validation."); + trace!(target: "dapps", "Fetching content finished. Starting validation ({:?})", path); Self::close_client(&mut self.client); // Unpack and verify - let state = match self.dapp.validate_and_install(path.clone()) { + let state = match self.installer.validate_and_install(path.clone()) { Err(e) => { - trace!(target: "dapps", "Error while validating dapp: {:?}", e); + trace!(target: "dapps", "Error while validating content: {:?}", e); FetchState::Error(ContentHandler::error( StatusCode::BadGateway, "Invalid Dapp", - "Downloaded bundle does not contain a valid dapp.", + "Downloaded bundle does not contain a valid content.", Some(&format!("{:?}", e)) )) }, - Ok(manifest) => FetchState::Done(manifest) + Ok((id, result)) => { + let address = redirection_address(self.using_dapps_domains, &id); + FetchState::Done(id, result, Redirection::new(&address)) + }, }; // Remove temporary zip file - // TODO [todr] Uncomment me - // let _ = fs::remove_file(path); + let _ = fs::remove_file(path); (Some(state), Next::write()) }, Ok(Err(e)) => { - warn!(target: "dapps", "Unable to fetch new dapp: {:?}", e); + warn!(target: "dapps", "Unable to fetch content: {:?}", e); let error = ContentHandler::error( StatusCode::BadGateway, "Download Error", - "There was an error when fetching the dapp.", + "There was an error when fetching the content.", Some(&format!("{:?}", e)), ); (Some(FetchState::Error(error)), Next::write()) @@ -205,6 +286,7 @@ impl server::Handler for ContentFetcherHandler< }; if let Some(status) = status { + self.fetch_control.set_status(&status); self.status = status; } @@ -213,12 +295,7 @@ impl server::Handler for ContentFetcherHandler< fn on_response(&mut self, res: &mut server::Response) -> Next { match self.status { - FetchState::Done(ref manifest) => { - trace!(target: "dapps", "Fetching dapp finished. Redirecting to {}", manifest.id); - res.set_status(StatusCode::Found); - res.headers_mut().set(header::Location(redirection_address(self.using_dapps_domains, &manifest.id))); - Next::write() - }, + FetchState::Done(_, _, ref mut handler) => handler.on_response(res), FetchState::Error(ref mut handler) => handler.on_response(res), _ => Next::end(), } @@ -226,9 +303,9 @@ impl server::Handler for ContentFetcherHandler< fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { match self.status { + FetchState::Done(_, _, ref mut handler) => handler.on_response_writable(encoder), FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), _ => Next::end(), } } } - diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index 6f6423b58..62b13eaa8 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -27,7 +27,7 @@ pub use self::auth::AuthRequiredHandler; pub use self::echo::EchoHandler; pub use self::content::ContentHandler; pub use self::redirect::Redirection; -pub use self::fetch::{ContentFetcherHandler, ContentValidator}; +pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl}; use url::Url; use hyper::{server, header, net, uri}; diff --git a/dapps/src/handlers/redirect.rs b/dapps/src/handlers/redirect.rs index 8b6158266..e43d32e24 100644 --- a/dapps/src/handlers/redirect.rs +++ b/dapps/src/handlers/redirect.rs @@ -20,15 +20,20 @@ use hyper::{header, server, Decoder, Encoder, Next}; use hyper::net::HttpStream; use hyper::status::StatusCode; +#[derive(Clone)] pub struct Redirection { to_url: String } impl Redirection { - pub fn new(url: &str) -> Box { - Box::new(Redirection { + pub fn new(url: &str) -> Self { + Redirection { to_url: url.to_owned() - }) + } + } + + pub fn boxed(url: &str) -> Box { + Box::new(Self::new(url)) } } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 87563a3ae..edc0bebe5 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -191,13 +191,16 @@ impl Server { ) -> Result { let panic_handler = Arc::new(Mutex::new(None)); let authorization = Arc::new(authorization); - let apps_fetcher = Arc::new(apps::fetcher::AppFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status)); + let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status)); let endpoints = Arc::new(apps::all_endpoints(dapps_path)); let special = Arc::new({ let mut special = HashMap::new(); special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone())); - special.insert(router::SpecialEndpoint::Api, api::RestApi::new(format!("{}", addr), endpoints.clone())); special.insert(router::SpecialEndpoint::Utils, apps::utils()); + special.insert( + router::SpecialEndpoint::Api, + api::RestApi::new(format!("{}", addr), endpoints.clone(), content_fetcher.clone()) + ); special }); let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); @@ -206,7 +209,7 @@ impl Server { .handle(move |ctrl| router::Router::new( ctrl, apps::main_page(), - apps_fetcher.clone(), + content_fetcher.clone(), endpoints.clone(), special.clone(), authorization.clone(), diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs index 86d4273d5..e34cc6434 100644 --- a/dapps/src/page/local.rs +++ b/dapps/src/page/local.rs @@ -17,20 +17,31 @@ use mime_guess; use std::io::{Seek, Read, SeekFrom}; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use page::handler; use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; +#[derive(Debug, Clone)] pub struct LocalPageEndpoint { path: PathBuf, - info: EndpointInfo, + mime: Option, + info: Option, } impl LocalPageEndpoint { pub fn new(path: PathBuf, info: EndpointInfo) -> Self { LocalPageEndpoint { path: path, - info: info, + mime: None, + info: Some(info), + } + } + + pub fn single_file(path: PathBuf, mime: String) -> Self { + LocalPageEndpoint { + path: path, + mime: Some(mime), + info: None, } } @@ -41,17 +52,40 @@ impl LocalPageEndpoint { impl Endpoint for LocalPageEndpoint { fn info(&self) -> Option<&EndpointInfo> { - Some(&self.info) + self.info.as_ref() } fn to_handler(&self, path: EndpointPath) -> Box { - Box::new(handler::PageHandler { - app: LocalDapp::new(self.path.clone()), - prefix: None, - path: path, - file: Default::default(), - safe_to_embed: false, - }) + if let Some(ref mime) = self.mime { + Box::new(handler::PageHandler { + app: LocalSingleFile { path: self.path.clone(), mime: mime.clone() }, + prefix: None, + path: path, + file: Default::default(), + safe_to_embed: false, + }) + } else { + Box::new(handler::PageHandler { + app: LocalDapp { path: self.path.clone() }, + prefix: None, + path: path, + file: Default::default(), + safe_to_embed: false, + }) + } + } +} + +struct LocalSingleFile { + path: PathBuf, + mime: String, +} + +impl handler::Dapp for LocalSingleFile { + type DappFile = LocalFile; + + fn file(&self, _path: &str) -> Option { + LocalFile::from_path(&self.path, Some(&self.mime)) } } @@ -59,14 +93,6 @@ struct LocalDapp { path: PathBuf, } -impl LocalDapp { - fn new(path: PathBuf) -> Self { - LocalDapp { - path: path - } - } -} - impl handler::Dapp for LocalDapp { type DappFile = LocalFile; @@ -75,18 +101,7 @@ impl handler::Dapp for LocalDapp { for part in file_path.split('/') { path.push(part); } - // Check if file exists - fs::File::open(path.clone()).ok().map(|file| { - let content_type = mime_guess::guess_mime_type(path); - let len = file.metadata().ok().map_or(0, |meta| meta.len()); - LocalFile { - content_type: content_type.to_string(), - buffer: [0; 4096], - file: file, - pos: 0, - len: len, - } - }) + LocalFile::from_path(&path, None) } } @@ -98,6 +113,24 @@ struct LocalFile { pos: u64, } +impl LocalFile { + fn from_path>(path: P, mime: Option<&str>) -> Option { + // Check if file exists + fs::File::open(&path).ok().map(|file| { + let content_type = mime.map(|mime| mime.to_owned()) + .unwrap_or_else(|| mime_guess::guess_mime_type(path).to_string()); + let len = file.metadata().ok().map_or(0, |meta| meta.len()); + LocalFile { + content_type: content_type, + buffer: [0; 4096], + file: file, + pos: 0, + len: len, + } + }) + } +} + impl handler::DappFile for LocalFile { fn content_type(&self) -> &str { &self.content_type diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index c93456d71..e3ff6e64f 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -27,7 +27,7 @@ use url::{Url, Host}; use hyper::{self, server, Next, Encoder, Decoder, Control, StatusCode}; use hyper::net::HttpStream; use apps; -use apps::fetcher::AppFetcher; +use apps::fetcher::ContentFetcher; use endpoint::{Endpoint, Endpoints, EndpointPath}; use handlers::{Redirection, extract_url, ContentHandler}; use self::auth::{Authorization, Authorized}; @@ -45,7 +45,7 @@ pub struct Router { control: Option, main_page: &'static str, endpoints: Arc, - fetch: Arc, + fetch: Arc, special: Arc>>, authorization: Arc, allowed_hosts: Option>, @@ -91,7 +91,7 @@ impl server::Handler for Router { (Some(ref path), _) if self.fetch.contains(&path.app_id) => { self.fetch.to_async_handler(path.clone(), control) }, - // Redirection to main page (maybe 404 instead?) + // 404 for non-existent content (Some(ref path), _) if *req.method() == hyper::method::Method::Get => { let address = apps::redirection_address(path.using_dapps_domains, self.main_page); Box::new(ContentHandler::error( @@ -104,7 +104,7 @@ impl server::Handler for Router { // Redirect any GET request to home. _ if *req.method() == hyper::method::Method::Get => { let address = apps::redirection_address(false, self.main_page); - Redirection::new(address.as_str()) + Redirection::boxed(address.as_str()) }, // RPC by default _ => { @@ -136,19 +136,19 @@ impl Router { pub fn new( control: Control, main_page: &'static str, - app_fetcher: Arc, + content_fetcher: Arc, endpoints: Arc, special: Arc>>, authorization: Arc, allowed_hosts: Option>, ) -> Self { - let handler = special.get(&SpecialEndpoint::Api).unwrap().to_handler(EndpointPath::default()); + let handler = special.get(&SpecialEndpoint::Utils).unwrap().to_handler(EndpointPath::default()); Router { control: Some(control), main_page: main_page, endpoints: endpoints, - fetch: app_fetcher, + fetch: content_fetcher, special: special, authorization: authorization, allowed_hosts: allowed_hosts, diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 4fbc37772..649d283ce 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -38,10 +38,6 @@ struct RpcEndpoint { } impl Endpoint for RpcEndpoint { - fn to_handler(&self, _path: EndpointPath) -> Box { - panic!("RPC Endpoint is asynchronous and requires Control object."); - } - fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { let panic_handler = PanicHandler { handler: self.panic_handler.clone() }; Box::new(ServerHandler::new( diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index ab0d33726..fc255ec20 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use tests::helpers::{serve, request}; +use tests::helpers::{serve, serve_with_registrar, request}; #[test] fn should_return_error() { @@ -82,3 +82,24 @@ fn should_handle_ping() { assert_eq!(response.body, "0\n\n".to_owned()); } + +#[test] +fn should_try_to_resolve_dapp() { + // given + let (server, registrar) = serve_with_registrar(); + + // when + let response = request(server, + "\ + GET /api/content/1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d HTTP/1.1\r\n\ + Host: home.parity\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + assert_eq!(registrar.calls.lock().len(), 2); +} + diff --git a/dapps/src/tests/helpers.rs b/dapps/src/tests/helpers.rs index 4cd21520c..efbd24a8d 100644 --- a/dapps/src/tests/helpers.rs +++ b/dapps/src/tests/helpers.rs @@ -17,7 +17,7 @@ use std::env; use std::str; use std::sync::Arc; -use rustc_serialize::hex::{ToHex, FromHex}; +use rustc_serialize::hex::FromHex; use ServerBuilder; use Server; diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs index 2fdecbd4d..1a360cd08 100644 --- a/dapps/src/tests/redirection.rs +++ b/dapps/src/tests/redirection.rs @@ -115,7 +115,7 @@ fn should_serve_rpc() { // then assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); + assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); } #[test] @@ -137,7 +137,7 @@ fn should_serve_rpc_at_slash_rpc() { // then assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); + assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); } diff --git a/db/Cargo.toml b/db/Cargo.toml index c7cbba2ea..15ceb9b3b 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -11,7 +11,7 @@ build = "build.rs" ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} ethcore-devtools = { path = "../devtools" } ethcore-ipc = { path = "../ipc/rpc" } rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } diff --git a/db/src/database.rs b/db/src/database.rs index 185618f99..9a52822f6 100644 --- a/db/src/database.rs +++ b/db/src/database.rs @@ -460,7 +460,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); client.put("xxx".as_bytes(), "1".as_bytes()).unwrap(); client.close().unwrap(); @@ -477,7 +477,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); client.put("xxx".as_bytes(), "1".as_bytes()).unwrap(); @@ -498,7 +498,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); assert!(client.get("xxx".as_bytes()).unwrap().is_none()); @@ -516,7 +516,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); let transaction = DBTransaction::new(); @@ -541,7 +541,7 @@ mod client_tests { let stop = StopGuard::new(); run_worker(&scope, stop.share(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); let mut batch = Vec::new(); diff --git a/db/src/lib.rs.in b/db/src/lib.rs.in index 4fa43b977..54fccb097 100644 --- a/db/src/lib.rs.in +++ b/db/src/lib.rs.in @@ -66,13 +66,13 @@ pub fn extras_service_url(db_path: &str) -> Result { pub fn blocks_client(db_path: &str) -> Result { let url = try!(blocks_service_url(db_path)); - let client = try!(nanoipc::init_client::>(&url)); + let client = try!(nanoipc::generic_client::>(&url)); Ok(client) } pub fn extras_client(db_path: &str) -> Result { let url = try!(extras_service_url(db_path)); - let client = try!(nanoipc::init_client::>(&url)); + let client = try!(nanoipc::generic_client::>(&url)); Ok(client) } diff --git a/devtools/src/http_client.rs b/devtools/src/http_client.rs index 27fa6ec50..f194c4004 100644 --- a/devtools/src/http_client.rs +++ b/devtools/src/http_client.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::time::Duration; use std::io::{Read, Write}; use std::str::{self, Lines}; use std::net::{TcpStream, SocketAddr}; @@ -43,10 +44,11 @@ pub fn read_block(lines: &mut Lines, all: bool) -> String { pub fn request(address: &SocketAddr, request: &str) -> Response { let mut req = TcpStream::connect(address).unwrap(); + req.set_read_timeout(Some(Duration::from_secs(1))).unwrap(); req.write_all(request.as_bytes()).unwrap(); let mut response = String::new(); - req.read_to_string(&mut response).unwrap(); + let _ = req.read_to_string(&mut response); let mut lines = response.lines(); let status = lines.next().unwrap().to_owned(); diff --git a/devtools/src/random_path.rs b/devtools/src/random_path.rs index d58042512..9c6c261a2 100644 --- a/devtools/src/random_path.rs +++ b/devtools/src/random_path.rs @@ -23,7 +23,8 @@ use std::ops::{Deref, DerefMut}; use rand::random; pub struct RandomTempPath { - path: PathBuf + path: PathBuf, + pub panic_on_drop_failure: bool, } pub fn random_filename() -> String { @@ -39,7 +40,8 @@ impl RandomTempPath { let mut dir = env::temp_dir(); dir.push(random_filename()); RandomTempPath { - path: dir.clone() + path: dir.clone(), + panic_on_drop_failure: true, } } @@ -48,7 +50,8 @@ impl RandomTempPath { dir.push(random_filename()); fs::create_dir_all(dir.as_path()).unwrap(); RandomTempPath { - path: dir.clone() + path: dir.clone(), + panic_on_drop_failure: true, } } @@ -72,12 +75,20 @@ impl AsRef for RandomTempPath { self.as_path() } } +impl Deref for RandomTempPath { + type Target = Path; + fn deref(&self) -> &Self::Target { + self.as_path() + } +} impl Drop for RandomTempPath { fn drop(&mut self) { if let Err(_) = fs::remove_dir_all(&self) { if let Err(e) = fs::remove_file(&self) { - panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); + if self.panic_on_drop_failure { + panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); + } } } } diff --git a/docker/ubuntu-aarch64/Dockerfile b/docker/ubuntu-aarch64/Dockerfile index aae09f71c..1f4159a54 100644 --- a/docker/ubuntu-aarch64/Dockerfile +++ b/docker/ubuntu-aarch64/Dockerfile @@ -23,15 +23,9 @@ RUN rustup target add aarch64-unknown-linux-gnu # show backtraces ENV RUST_BACKTRACE 1 -# set compilers -ENV CXX aarch64-linux-gnu-g++ -ENV CC aarch64-linux-gnu-gcc - # show tools RUN rustc -vV && \ - cargo -V && \ - gcc -v &&\ - g++ -v + cargo -V # build parity RUN git clone https://github.com/ethcore/parity && \ diff --git a/docker/ubuntu-arm/Dockerfile b/docker/ubuntu-arm/Dockerfile index 54a54ad55..6c2fa2852 100644 --- a/docker/ubuntu-arm/Dockerfile +++ b/docker/ubuntu-arm/Dockerfile @@ -23,15 +23,10 @@ RUN rustup target add armv7-unknown-linux-gnueabihf # show backtraces ENV RUST_BACKTRACE 1 -# set compilers -ENV CXX arm-linux-gnueabihf-g++ -ENV CC arm-linux-gnueabihf-gcc # show tools RUN rustc -vV && \ - cargo -V && \ - gcc -v &&\ - g++ -v + cargo -V # build parity RUN git clone https://github.com/ethcore/parity && \ diff --git a/ethash/src/compute.rs b/ethash/src/compute.rs index bb297d709..a99a0e3b5 100644 --- a/ethash/src/compute.rs +++ b/ethash/src/compute.rs @@ -91,7 +91,7 @@ pub struct Light { seed_compute: Mutex, } -/// Light cache structur +/// Light cache structure impl Light { /// Create a new light cache for a given block number pub fn new(block_number: u64) -> Light { @@ -134,16 +134,27 @@ impl Light { }) } - pub fn to_file(&self) -> io::Result<()> { + pub fn to_file(&self) -> io::Result { let seed_compute = self.seed_compute.lock(); let path = Light::file_path(seed_compute.get_seedhash(self.block_number)); + + if self.block_number >= ETHASH_EPOCH_LENGTH * 2 { + let deprecated = Light::file_path( + seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2)); + + if deprecated.exists() { + debug!(target: "ethash", "removing: {:?}", &deprecated); + try!(fs::remove_file(deprecated)); + } + } + try!(fs::create_dir_all(path.parent().unwrap())); - let mut file = try!(File::create(path)); + let mut file = try!(File::create(&path)); let cache_size = self.cache.len() * NODE_BYTES; let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) }; try!(file.write(buf)); - Ok(()) + Ok(path) } } @@ -455,3 +466,18 @@ fn test_seed_compute_after_newer() { let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162]; assert_eq!(seed_compute.get_seedhash(486382), hash); } + +#[test] +fn test_drop_old_data() { + let first = Light::new(0).to_file().unwrap(); + + let second = Light::new(ETHASH_EPOCH_LENGTH).to_file().unwrap(); + assert!(fs::metadata(&first).is_ok()); + + let _ = Light::new(ETHASH_EPOCH_LENGTH * 2).to_file(); + assert!(fs::metadata(&first).is_err()); + assert!(fs::metadata(&second).is_ok()); + + let _ = Light::new(ETHASH_EPOCH_LENGTH * 3).to_file(); + assert!(fs::metadata(&second).is_err()); +} diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index fe6a682cb..3ad9e69c4 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -20,12 +20,12 @@ num_cpus = "0.2" crossbeam = "0.2.9" lazy_static = "0.2" bloomchain = "0.1" -rayon = "0.3.1" +rayon = "0.4.2" semver = "0.2" bit-set = "0.4" time = "0.1" evmjit = { path = "../evmjit", optional = true } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} ethash = { path = "../ethash" } ethcore-util = { path = "../util" } ethcore-io = { path = "../util/io" } diff --git a/ethcore/src/account_provider.rs b/ethcore/src/account_provider.rs index c2379d09e..851d015ba 100644 --- a/ethcore/src/account_provider.rs +++ b/ethcore/src/account_provider.rs @@ -322,6 +322,26 @@ impl AccountProvider { Ok(signature) } + /// Decrypts a message. Account must be unlocked. + pub fn decrypt(&self, account: Address, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let data = { + let mut unlocked = self.unlocked.lock(); + let data = try!(unlocked.get(&account).ok_or(Error::NotUnlocked)).clone(); + if let Unlock::Temp = data.unlock { + unlocked.remove(&account).expect("data exists: so key must exist: qed"); + } + if let Unlock::Timed((ref start, ref duration)) = data.unlock { + if start.elapsed() > Duration::from_millis(*duration as u64) { + unlocked.remove(&account).expect("data exists: so key must exist: qed"); + return Err(Error::NotUnlocked); + } + } + data + }; + + Ok(try!(self.sstore.decrypt(&account, &data.password, shared_mac, message))) + } + /// Unlocks an account, signs the message, and locks it again. pub fn sign_with_password(&self, account: Address, password: String, message: Message) -> Result { let signature = try!(self.sstore.sign(&account, &password, &message)); diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 0b42d6838..b35b4dc1a 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -205,7 +205,6 @@ pub struct ClosedBlock { block: ExecutedBlock, uncle_bytes: Bytes, last_hashes: Arc, - unclosed_state: State, } /// Just like `ClosedBlock` except that we can't reopen it and it's faster. @@ -343,18 +342,19 @@ impl<'x> OpenBlock<'x> { } } - /// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles. + /// Turn this into a `ClosedBlock`. pub fn close(self) -> ClosedBlock { let mut s = self; - let unclosed_state = s.block.state.clone(); + // take a snapshot so the engine's changes can be rolled back. + s.block.state.snapshot(); s.engine.on_close_block(&mut s.block); - s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); s.block.base.header.set_state_root(s.block.state.root().clone()); - s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()))); s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); @@ -362,33 +362,37 @@ impl<'x> OpenBlock<'x> { block: s.block, uncle_bytes: uncle_bytes, last_hashes: s.last_hashes, - unclosed_state: unclosed_state, } } - /// Turn this into a `LockedBlock`. A BlockChain must be provided in order to figure out the uncles. + /// Turn this into a `LockedBlock`. pub fn close_and_lock(self) -> LockedBlock { let mut s = self; + // take a snapshot so the engine's changes can be rolled back. + s.block.state.snapshot(); + s.engine.on_close_block(&mut s.block); if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP { - s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); } let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); if s.block.base.header.uncles_hash().is_zero() { s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); } if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP { - s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()))); } + s.block.base.header.set_state_root(s.block.state.root().clone()); s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); - LockedBlock { + ClosedBlock { block: s.block, uncle_bytes: uncle_bytes, - } + last_hashes: s.last_hashes, + }.lock() } } @@ -409,7 +413,17 @@ impl ClosedBlock { pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } /// Turn this into a `LockedBlock`, unable to be reopened again. - pub fn lock(self) -> LockedBlock { + pub fn lock(mut self) -> LockedBlock { + // finalize the changes made by the engine. + self.block.state.clear_snapshot(); + if let Err(e) = self.block.state.commit() { + warn!("Error committing closed block's state: {:?}", e); + } + + // set the state root here, after commit recalculates with the block + // rewards. + self.block.base.header.set_state_root(self.block.state.root().clone()); + LockedBlock { block: self.block, uncle_bytes: self.uncle_bytes, @@ -417,12 +431,12 @@ impl ClosedBlock { } /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen<'a>(self, engine: &'a Engine) -> OpenBlock<'a> { + pub fn reopen(mut self, engine: &Engine) -> OpenBlock { // revert rewards (i.e. set state back at last transaction's state). - let mut block = self.block; - block.state = self.unclosed_state; + self.block.state.revert_snapshot(); + OpenBlock { - block: block, + block: self.block, engine: engine, last_hashes: self.last_hashes, } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index ef26a6b08..392581fd1 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -23,6 +23,7 @@ use header::*; use super::extras::*; use transaction::*; use views::*; +use log_entry::{LogEntry, LocalizedLogEntry}; use receipt::Receipt; use blooms::{Bloom, BloomGroup}; use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; @@ -127,6 +128,10 @@ pub trait BlockProvider { /// Returns numbers of blocks containing given bloom. fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec; + + /// Returns logs matching given filter. + fn logs(&self, mut blocks: Vec, matches: F, limit: Option) -> Vec + where F: Fn(&LogEntry) -> bool, Self: Sized; } #[derive(Debug, Hash, Eq, PartialEq, Clone)] @@ -315,6 +320,51 @@ impl BlockProvider for BlockChain { .map(|b| b as BlockNumber) .collect() } + + fn logs(&self, mut blocks: Vec, matches: F, limit: Option) -> Vec + where F: Fn(&LogEntry) -> bool, Self: Sized { + // sort in reverse order + blocks.sort_by(|a, b| b.cmp(a)); + + let mut log_index = 0; + let mut logs = blocks.into_iter() + .filter_map(|number| self.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) + .flat_map(|(number, hash, mut receipts, hashes)| { + assert_eq!(receipts.len(), hashes.len()); + log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len()); + + let receipts_len = receipts.len(); + receipts.reverse(); + receipts.into_iter() + .map(|receipt| receipt.logs) + .zip(hashes) + .enumerate() + .flat_map(move |(index, (mut logs, tx_hash))| { + let current_log_index = log_index; + log_index -= logs.len(); + + logs.reverse(); + logs.into_iter() + .enumerate() + .map(move |(i, log)| LocalizedLogEntry { + entry: log, + block_hash: hash, + block_number: number, + transaction_hash: tx_hash, + // iterating in reverse order + transaction_index: receipts_len - index - 1, + log_index: current_log_index - i - 1, + }) + }) + }) + .filter(|log_entry| matches(&log_entry.entry)) + .take(limit.unwrap_or(::std::usize::MAX)) + .collect::>(); + logs.reverse(); + logs + } } pub struct AncestryIter<'a> { @@ -1160,6 +1210,7 @@ mod tests { use blockchain::extras::TransactionAddress; use views::BlockView; use transaction::{Transaction, Action}; + use log_entry::{LogEntry, LocalizedLogEntry}; fn new_db(path: &str) -> Arc { Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap()) @@ -1235,7 +1286,7 @@ mod tests { let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let mut block_hashes = vec![genesis_hash.clone()]; - let mut batch =db.transaction(); + let mut batch = db.transaction(); for _ in 0..10 { let block = canon_chain.generate(&mut finalizer).unwrap(); block_hashes.push(BlockView::new(&block).header_view().sha3()); @@ -1566,7 +1617,7 @@ mod tests { let mut block_header = bc.block_header(&best_hash); while !block_header.is_none() { - block_header = bc.block_header(&block_header.unwrap().parent_hash()); + block_header = bc.block_header(block_header.unwrap().parent_hash()); } assert!(bc.cache_size().blocks > 1024 * 1024); @@ -1612,13 +1663,134 @@ mod tests { } fn insert_block(db: &Arc, bc: &BlockChain, bytes: &[u8], receipts: Vec) -> ImportRoute { - let mut batch =db.transaction(); + let mut batch = db.transaction(); let res = bc.insert_block(&mut batch, bytes, receipts); db.write(batch).unwrap(); bc.commit(); res } + #[test] + fn test_logs() { + // given + let mut canon_chain = ChainGenerator::default(); + let mut finalizer = BlockFinalizer::default(); + let genesis = canon_chain.generate(&mut finalizer).unwrap(); + // just insert dummy transaction so that #transactions=#receipts + let t1 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), + }.sign(&"".sha3()); + let t2 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), + }.sign(&"".sha3()); + let t3 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), + }.sign(&"".sha3()); + let tx_hash1 = t1.hash(); + let tx_hash2 = t2.hash(); + let tx_hash3 = t3.hash(); + let b1 = canon_chain.with_transaction(t1).with_transaction(t2).generate(&mut finalizer).unwrap(); + let b2 = canon_chain.with_transaction(t3).generate(&mut finalizer).unwrap(); + + let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + insert_block(&db, &bc, &b1, vec![Receipt { + state_root: H256::default(), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![ + LogEntry { address: Default::default(), topics: vec![], data: vec![1], }, + LogEntry { address: Default::default(), topics: vec![], data: vec![2], }, + ], + }, + Receipt { + state_root: H256::default(), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![ + LogEntry { address: Default::default(), topics: vec![], data: vec![3], }, + ], + }]); + insert_block(&db, &bc, &b2, vec![ + Receipt { + state_root: H256::default(), + gas_used: 10_000.into(), + log_bloom: Default::default(), + logs: vec![ + LogEntry { address: Default::default(), topics: vec![], data: vec![4], }, + ], + } + ]); + + // when + let block1 = BlockView::new(&b1); + let block2 = BlockView::new(&b2); + let logs1 = bc.logs(vec![1, 2], |_| true, None); + let logs2 = bc.logs(vec![1, 2], |_| true, Some(1)); + + // then + assert_eq!(logs1, vec![ + LocalizedLogEntry { + entry: LogEntry { address: Default::default(), topics: vec![], data: vec![1] }, + block_hash: block1.hash(), + block_number: block1.header().number(), + transaction_hash: tx_hash1.clone(), + transaction_index: 0, + log_index: 0, + }, + LocalizedLogEntry { + entry: LogEntry { address: Default::default(), topics: vec![], data: vec![2] }, + block_hash: block1.hash(), + block_number: block1.header().number(), + transaction_hash: tx_hash1.clone(), + transaction_index: 0, + log_index: 1, + }, + LocalizedLogEntry { + entry: LogEntry { address: Default::default(), topics: vec![], data: vec![3] }, + block_hash: block1.hash(), + block_number: block1.header().number(), + transaction_hash: tx_hash2.clone(), + transaction_index: 1, + log_index: 2, + }, + LocalizedLogEntry { + entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] }, + block_hash: block2.hash(), + block_number: block2.header().number(), + transaction_hash: tx_hash3.clone(), + transaction_index: 0, + log_index: 0, + } + ]); + assert_eq!(logs2, vec![ + LocalizedLogEntry { + entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] }, + block_hash: block2.hash(), + block_number: block2.header().number(), + transaction_hash: tx_hash3.clone(), + transaction_index: 0, + log_index: 0, + } + ]); + } + #[test] fn test_bloom_filter_simple() { // TODO: From here diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index 2825a2a12..e8eb0ed68 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -17,14 +17,15 @@ use crypto::sha2::Sha256 as Sha256Digest; use crypto::ripemd160::Ripemd160 as Ripemd160Digest; use crypto::digest::Digest; -use util::*; +use std::cmp::min; +use util::{U256, H256, Hashable, FixedHash, BytesRef}; use ethkey::{Signature, recover as ec_recover}; use ethjson; /// Native implementation of a built-in contract. pub trait Impl: Send + Sync { /// execute this built-in on the given input, writing to the given output. - fn execute(&self, input: &[u8], out: &mut [u8]); + fn execute(&self, input: &[u8], output: &mut BytesRef); } /// A gas pricing scheme for built-in contracts. @@ -56,7 +57,7 @@ impl Builtin { pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) } /// Simple forwarder for execute. - pub fn execute(&self, input: &[u8], output: &mut[u8]) { self.native.execute(input, output) } + pub fn execute(&self, input: &[u8], output: &mut BytesRef) { self.native.execute(input, output) } } impl From for Builtin { @@ -108,14 +109,13 @@ struct Sha256; struct Ripemd160; impl Impl for Identity { - fn execute(&self, input: &[u8], output: &mut [u8]) { - let len = min(input.len(), output.len()); - output[..len].copy_from_slice(&input[..len]); + fn execute(&self, input: &[u8], output: &mut BytesRef) { + output.write(0, input); } } impl Impl for EcRecover { - fn execute(&self, i: &[u8], output: &mut [u8]) { + fn execute(&self, i: &[u8], output: &mut BytesRef) { let len = min(i.len(), 128); let mut input = [0; 128]; @@ -135,58 +135,34 @@ impl Impl for EcRecover { if s.is_valid() { if let Ok(p) = ec_recover(&s, &hash) { let r = p.sha3(); - - let out_len = min(output.len(), 32); - - for x in &mut output[0.. min(12, out_len)] { - *x = 0; - } - - if out_len > 12 { - output[12..out_len].copy_from_slice(&r[12..out_len]); - } + output.write(0, &[0; 12]); + output.write(12, &r[12..r.len()]); } } } } impl Impl for Sha256 { - fn execute(&self, input: &[u8], output: &mut [u8]) { - let out_len = min(output.len(), 32); - + fn execute(&self, input: &[u8], output: &mut BytesRef) { let mut sha = Sha256Digest::new(); sha.input(input); - if out_len == 32 { - sha.result(&mut output[0..32]); - } else { - let mut out = [0; 32]; - sha.result(&mut out); + let mut out = [0; 32]; + sha.result(&mut out); - output.copy_from_slice(&out[..out_len]) - } + output.write(0, &out); } } impl Impl for Ripemd160 { - fn execute(&self, input: &[u8], output: &mut [u8]) { - let out_len = min(output.len(), 32); - + fn execute(&self, input: &[u8], output: &mut BytesRef) { let mut sha = Ripemd160Digest::new(); sha.input(input); - for x in &mut output[0.. min(12, out_len)] { - *x = 0; - } + let mut out = [0; 32]; + sha.result(&mut out[12..32]); - if out_len >= 32 { - sha.result(&mut output[12..32]); - } else if out_len > 12 { - let mut out = [0; 20]; - sha.result(&mut out); - - output.copy_from_slice(&out[12..out_len]) - } + output.write(0, &out); } } @@ -194,7 +170,7 @@ impl Impl for Ripemd160 { mod tests { use super::{Builtin, Linear, ethereum_builtin, Pricer}; use ethjson; - use util::U256; + use util::{U256, BytesRef}; #[test] fn identity() { @@ -203,15 +179,15 @@ mod tests { let i = [0u8, 1, 2, 3]; let mut o2 = [255u8; 2]; - f.execute(&i[..], &mut o2[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o2[..])); assert_eq!(i[0..2], o2); let mut o4 = [255u8; 4]; - f.execute(&i[..], &mut o4[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o4[..])); assert_eq!(i, o4); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(i, o8[..4]); assert_eq!([255u8; 4], o8[4..]); } @@ -224,16 +200,20 @@ mod tests { let i = [0u8; 0]; let mut o = [255u8; 32]; - f.execute(&i[..], &mut o[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]); let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut o34[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])); assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]); + + let mut ov = vec![]; + f.execute(&i[..], &mut BytesRef::Flexible(&mut ov)); + assert_eq!(&ov[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); } #[test] @@ -244,15 +224,15 @@ mod tests { let i = [0u8; 0]; let mut o = [255u8; 32]; - f.execute(&i[..], &mut o[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut o34[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])); assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]); } @@ -272,46 +252,46 @@ mod tests { let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let mut o = [255u8; 32]; - f.execute(&i[..], &mut o[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut o34[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])); assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); // TODO: Should this (corrupted version of the above) fail rather than returning some address? /* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ } @@ -336,7 +316,7 @@ mod tests { let i = [0u8, 1, 2, 3]; let mut o = [255u8; 4]; - b.execute(&i[..], &mut o[..]); + b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(i, o); } @@ -357,7 +337,7 @@ mod tests { let i = [0u8, 1, 2, 3]; let mut o = [255u8; 4]; - b.execute(&i[..], &mut o[..]); + b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(i, o); } -} \ No newline at end of file +} diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 36da0dcef..0e662aa77 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -145,7 +145,9 @@ pub struct Client { factories: Factories, } -const HISTORY: u64 = 1200; +/// The pruning constant -- how old blocks must be before we +/// assume finality of a given candidate. +pub const HISTORY: u64 = 1200; /// Append a path element to the given path and return the string. pub fn append_path

(path: P, item: &str) -> String where P: AsRef { @@ -169,7 +171,7 @@ impl Client { let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); - let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()))); + let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { @@ -674,6 +676,8 @@ impl Client { impl snapshot::DatabaseRestore for Client { /// Restart the client with a new backend fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { + trace!(target: "snapshot", "Replacing client database with {:?}", new_db); + let _import_lock = self.import_lock.lock(); let mut state_db = self.state_db.write(); let mut chain = self.chain.write(); @@ -684,7 +688,7 @@ impl snapshot::DatabaseRestore for Client { *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); - *tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from)); + *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); Ok(()) } } @@ -957,9 +961,7 @@ impl BlockChainClient for Client { } fn logs(&self, filter: Filter) -> Vec { - // TODO: lock blockchain only once - - let mut blocks = filter.bloom_possibilities().iter() + let blocks = filter.bloom_possibilities().iter() .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .flat_map(|m| m) // remove duplicate elements @@ -967,35 +969,7 @@ impl BlockChainClient for Client { .into_iter() .collect::>(); - blocks.sort(); - - let chain = self.chain.read(); - blocks.into_iter() - .filter_map(|number| chain.block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes()))) - .flat_map(|(number, hash, receipts, hashes)| { - let mut log_index = 0; - receipts.into_iter() - .enumerate() - .flat_map(|(index, receipt)| { - log_index += receipt.logs.len(); - receipt.logs.into_iter() - .enumerate() - .filter(|tuple| filter.matches(&tuple.1)) - .map(|(i, log)| LocalizedLogEntry { - entry: log, - block_hash: hash.clone(), - block_number: number, - transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::default), - transaction_index: index, - log_index: log_index + i - }) - .collect::>() - }) - .collect::>() - }) - .collect() + self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit) } fn filter_traces(&self, filter: TraceFilter) -> Option> { diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index bb70de6cd..0146293df 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -18,7 +18,7 @@ use std::str::FromStr; pub use std::time::Duration; pub use block_queue::BlockQueueConfig; pub use blockchain::Config as BlockChainConfig; -pub use trace::{Config as TraceConfig, Switch}; +pub use trace::Config as TraceConfig; pub use evm::VMType; pub use verification::VerifierType; use util::{journaldb, CompactionProfile}; @@ -102,7 +102,7 @@ pub struct ClientConfig { /// State db compaction profile pub db_compaction: DatabaseCompactionProfile, /// Should db have WAL enabled? - pub db_wal: bool, + pub db_wal: bool, /// Operating mode pub mode: Mode, /// Type of block verifier used by client. diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 32582ddf2..a5ff89c47 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -23,7 +23,7 @@ mod trace; mod client; pub use self::client::*; -pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType}; +pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType}; pub use self::error::Error; pub use types::ids::*; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 1de82abfe..2e5b9365d 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -67,6 +67,8 @@ pub struct TestBlockChainClient { pub execution_result: RwLock>>, /// Transaction receipts. pub receipts: RwLock>, + /// Logs + pub logs: RwLock>, /// Block queue size. pub queue_size: AtomicUsize, /// Miner @@ -114,6 +116,7 @@ impl TestBlockChainClient { code: RwLock::new(HashMap::new()), execution_result: RwLock::new(None), receipts: RwLock::new(HashMap::new()), + logs: RwLock::new(Vec::new()), queue_size: AtomicUsize::new(0), miner: Arc::new(Miner::with_spec(&spec)), spec: spec, @@ -165,6 +168,11 @@ impl TestBlockChainClient { *self.latest_block_timestamp.write() = ts; } + /// Set logs to return for each logs call. + pub fn set_logs(&self, logs: Vec) { + *self.logs.write() = logs; + } + /// Add blocks to test client. pub fn add_blocks(&self, count: usize, with: EachBlockWith) { let len = self.numbers.read().len(); @@ -390,8 +398,13 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn logs(&self, _filter: Filter) -> Vec { - unimplemented!(); + fn logs(&self, filter: Filter) -> Vec { + let mut logs = self.logs.read().clone(); + let len = logs.len(); + match filter.limit { + Some(limit) if limit <= len => logs.split_off(len - limit), + _ => logs, + } } fn last_hashes(&self) -> LastHashes { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 368bae7d6..fa2b56afe 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -218,8 +218,11 @@ pub trait BlockChainClient : Sync + Send { /// Extended client interface used for mining pub trait MiningBlockChainClient : BlockChainClient { /// Returns OpenBlock prepared for closing. - fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) - -> OpenBlock; + fn prepare_open_block(&self, + author: Address, + gas_range_target: (U256, U256), + extra_data: Bytes + ) -> OpenBlock; /// Returns EvmFactory. fn vm_factory(&self) -> &EvmFactory; diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 18dfeec46..8d852e99e 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -99,6 +99,10 @@ impl Engine for BasicAuthority { /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). fn on_close_block(&self, _block: &mut ExecutedBlock) {} + fn is_sealer(&self, author: &Address) -> Option { + Some(self.our_params.authorities.contains(author)) + } + /// Attempt to seal the block internally. /// /// This operation is synchronous and may (quite reasonably) not be available, in which `false` will @@ -257,4 +261,14 @@ mod tests { let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap(); assert!(b.try_seal(engine, seal).is_ok()); } + + #[test] + fn seals_internally() { + let tap = AccountProvider::transient_provider(); + let authority = tap.insert_account("".sha3(), "").unwrap(); + + let engine = new_test_authority().engine; + assert!(!engine.is_sealer(&Address::default()).unwrap()); + assert!(engine.is_sealer(&authority).unwrap()); + } } diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 3c95f3465..26d2ed5bf 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -58,6 +58,8 @@ impl Engine for InstantSeal { Schedule::new_homestead() } + fn is_sealer(&self, _author: &Address) -> Option { Some(true) } + fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option> { Some(Vec::new()) } @@ -71,18 +73,12 @@ mod tests { use spec::Spec; use block::*; - /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_instant() -> Spec { - let bytes: &[u8] = include_bytes!("../../res/instant_seal.json"); - Spec::load(bytes).expect("invalid chain spec") - } - #[test] fn instant_can_seal() { let tap = AccountProvider::transient_provider(); let addr = tap.insert_account("".sha3(), "").unwrap(); - let spec = new_test_instant(); + let spec = Spec::new_test_instant(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); let mut db_result = get_temp_journal_db(); @@ -98,7 +94,7 @@ mod tests { #[test] fn instant_cant_verify() { - let engine = new_test_instant().engine; + let engine = Spec::new_test_instant().engine; let mut header: Header = Header::default(); assert!(engine.verify_block_basic(&header, None).is_ok()); diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index fe0674b09..ff8f13ebd 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -30,7 +30,7 @@ pub use self::tendermint::Tendermint; pub use self::signed_vote::SignedVote; pub use self::propose_collect::ProposeCollect; -use common::{HashMap, SemanticVersion, Header, EnvInfo, Address, Builtin, BTreeMap, U256, Bytes, SignedTransaction, Error, H520}; +use common::*; use rlp::UntrustedRlp; use account_provider::AccountProvider; use block::ExecutedBlock; @@ -69,7 +69,7 @@ pub trait Engine : Sync + Send { fn extra_info(&self, _header: &Header) -> HashMap { HashMap::new() } /// Additional information. - fn additional_params(&self) -> HashMap { HashMap::new() } + fn additional_params(&self) -> HashMap { HashMap::new() } /// Get the general parameters of the chain. fn params(&self) -> &CommonParams; @@ -95,6 +95,11 @@ pub trait Engine : Sync + Send { /// Block transformation functions, after the transactions. fn on_close_block(&self, _block: &mut ExecutedBlock) {} + /// If Some(true) this author is able to generate seals, generate_seal has to be implemented. + /// None indicates that this Engine never seals internally regardless of author (e.g. PoW). + fn is_sealer(&self, _author: &Address) -> Option { None } + /// Checks if default address is able to seal. + fn is_default_sealer(&self) -> Option { self.is_sealer(&Default::default()) } /// Attempt to seal the block internally. /// /// If `Some` is returned, then you get a valid seal. @@ -149,7 +154,7 @@ pub trait Engine : Sync + Send { fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.builtins().get(a).unwrap().cost(input.len()) } /// Execution the builtin contract `a` on `input` and return `output`. /// Panics if `is_builtin(a)` is not true. - fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut [u8]) { self.builtins().get(a).unwrap().execute(input, output); } + fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut BytesRef) { self.builtins().get(a).unwrap().execute(input, output); } // TODO: sealing stuff - though might want to leave this for later. } diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 4d25f812a..734acb758 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -160,16 +160,14 @@ impl Engine for Ethash { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(&fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len()))); + fields.state.add_balance(fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len()))); // Bestow uncle rewards let current_number = fields.header.number(); for u in fields.uncles.iter() { fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8))); } - if let Err(e) = fields.state.commit() { - warn!("Encountered error on state commit: {}", e); - } + } fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index 813819250..8d2202480 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -113,7 +113,10 @@ impl<'a> Finalize for Result> { } /// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256 -pub trait CostType: ops::Mul + ops::Div + ops::Add + ops::Sub + ops::Shr + ops::Shl + cmp::Ord + Sized + From + Copy { +pub trait CostType: Sized + From + Copy + + ops::Mul + ops::Div + ops::Add +ops::Sub + + ops::Shr + ops::Shl + + cmp::Ord + fmt::Debug { /// Converts this cost into `U256` fn as_u256(&self) -> U256; /// Tries to fit `U256` into this `Cost` type diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index 0db789507..2bbc7035b 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -83,6 +83,9 @@ pub trait Ext { /// Returns code at given address fn extcode(&self, address: &Address) -> Bytes; + /// Returns code size at given address + fn extcodesize(&self, address: &Address) -> usize; + /// Creates log entry with given topics and data fn log(&mut self, topics: Vec, data: &[u8]); diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index d1b9b18bc..ad2d5cd34 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -53,6 +53,17 @@ fn color(instruction: Instruction, name: &'static str) -> String { type CodePosition = usize; type ProgramCounter = usize; +const ONE: U256 = U256([1, 0, 0, 0]); +const TWO: U256 = U256([2, 0, 0, 0]); +const TWO_POW_5: U256 = U256([0x20, 0, 0, 0]); +const TWO_POW_8: U256 = U256([0x100, 0, 0, 0]); +const TWO_POW_16: U256 = U256([0x10000, 0, 0, 0]); +const TWO_POW_24: U256 = U256([0x1000000, 0, 0, 0]); +const TWO_POW_64: U256 = U256([0, 0x1, 0, 0]); // 0x1 00000000 00000000 +const TWO_POW_96: U256 = U256([0, 0x100000000, 0, 0]); //0x1 00000000 00000000 00000000 +const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000 + /// Abstraction over raw vector of Bytes. Easier state management of PC. struct CodeReader<'a> { position: ProgramCounter, @@ -126,7 +137,7 @@ impl evm::Evm for Interpreter { gasometer.current_gas = gasometer.current_gas - gas_cost; evm_debug!({ - println!("[0x{:x}][{}(0x{:x}) Gas: {:x}\n Gas Before: {:x}", + println!("[0x{:x}][{}(0x{:x}) Gas: {:?}\n Gas Before: {:?}", reader.position, color(instruction, info.name), instruction, @@ -471,7 +482,7 @@ impl Interpreter { }, instructions::EXTCODESIZE => { let address = u256_to_address(&stack.pop_back()); - let len = ext.extcode(&address).len(); + let len = ext.extcodesize(&address); stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { @@ -599,7 +610,19 @@ impl Interpreter { let a = stack.pop_back(); let b = stack.pop_back(); stack.push(if !self.is_zero(&b) { - a.overflowing_div(b).0 + match b { + ONE => a, + TWO => a >> 1, + TWO_POW_5 => a >> 5, + TWO_POW_8 => a >> 8, + TWO_POW_16 => a >> 16, + TWO_POW_24 => a >> 24, + TWO_POW_64 => a >> 64, + TWO_POW_96 => a >> 96, + TWO_POW_224 => a >> 224, + TWO_POW_248 => a >> 248, + _ => a.overflowing_div(b).0, + } } else { U256::zero() }); diff --git a/ethcore/src/evm/jit.rs b/ethcore/src/evm/jit.rs index 4f43d327b..c62f87ab7 100644 --- a/ethcore/src/evm/jit.rs +++ b/ethcore/src/evm/jit.rs @@ -18,6 +18,7 @@ use common::*; use evmjit; use evm::{self, GasLeft}; +use types::executed::CallType; /// Should be used to convert jit types to ethcore trait FromJit: Sized { @@ -77,10 +78,11 @@ impl IntoJit for U256 { impl IntoJit for H256 { fn into_jit(self) -> evmjit::I256 { let mut ret = [0; 4]; - for i in 0..self.bytes().len() { - let rev = self.bytes().len() - 1 - i; + let len = self.len(); + for i in 0..len { + let rev = len - 1 - i; let pos = rev / 8; - ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8); + ret[pos] += (self[i] as u64) << ((rev % 8) * 8); } evmjit::I256 { words: ret } } @@ -206,6 +208,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { let sender_address = unsafe { Address::from_jit(&*sender_address) }; let receive_address = unsafe { Address::from_jit(&*receive_address) }; let code_address = unsafe { Address::from_jit(&*code_address) }; + // TODO Is it always safe in case of DELEGATE_CALL? let transfer_value = unsafe { U256::from_jit(&*transfer_value) }; let value = Some(transfer_value); @@ -239,6 +242,12 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { } } + // TODO [ToDr] Any way to detect DelegateCall? + let call_type = match is_callcode { + true => CallType::CallCode, + false => CallType::Call, + }; + match self.ext.call( &call_gas, &sender_address, @@ -246,7 +255,9 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { value, unsafe { slice::from_raw_parts(in_beg, in_size as usize) }, &code_address, - unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) { + unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }, + call_type, + ) { evm::MessageCallResult::Success(gas_left) => unsafe { *io_gas = (gas + gas_left).low_u64(); true diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index bdb1f1ddb..ec217b6c5 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -140,6 +140,10 @@ impl Ext for FakeExt { self.codes.get(address).unwrap_or(&Bytes::new()).clone() } + fn extcodesize(&self, address: &Address) -> usize { + self.codes.get(address).map(|v| v.len()).unwrap_or(0) + } + fn log(&mut self, topics: Vec, data: &[u8]) { self.logs.push(FakeLogEntry { topics: topics, diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 332eda190..8f8b534ee 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -193,7 +193,6 @@ impl<'a> Executive<'a> { data: Some(t.data.clone()), call_type: CallType::Call, }; - // TODO: move output upstream let mut out = vec![]; (self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out) } diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 09c4b4e11..7395522c3 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -205,6 +205,11 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT self.state.code(address).unwrap_or_else(|| vec![]) } + fn extcodesize(&self, address: &Address) -> usize { + self.state.code_size(address).unwrap_or(0) + } + + #[cfg_attr(feature="dev", allow(match_ref_pats))] fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result where Self: Sized { diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 7304f5931..1fe98acdb 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -131,6 +131,10 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer { self.ext.extcode(address) } + fn extcodesize(&self, address: &Address) -> usize { + self.ext.extcodesize(address) + } + fn log(&mut self, topics: Vec, data: &[u8]) { self.ext.log(topics, data) } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index c9d60f075..182234280 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -24,7 +24,7 @@ use views::{BlockView, HeaderView}; use state::State; use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics}; use executive::contract_address; -use block::{ClosedBlock, IsBlock, Block}; +use block::{ClosedBlock, SealedBlock, IsBlock, Block}; use error::*; use transaction::{Action, SignedTransaction}; use receipt::{Receipt, RichReceipt}; @@ -34,6 +34,7 @@ use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, Transac use miner::work_notify::WorkPoster; use client::TransactionImportResult; use miner::price_info::PriceInfo; +use header::BlockNumber; /// Different possible definitions for pending transaction set. #[derive(Debug, PartialEq)] @@ -165,6 +166,7 @@ struct SealingWork { } /// Keeps track of transactions using priority queue and holds currently mined block. +/// Handles preparing work for "work sealing" or seals "internally" if Engine does not require work. pub struct Miner { // NOTE [ToDr] When locking always lock in this order! transaction_queue: Arc>, @@ -173,6 +175,7 @@ pub struct Miner { sealing_block_last_request: Mutex, // for sealing... options: MinerOptions, + seals_internally: bool, gas_range_target: RwLock<(U256, U256)>, author: RwLock

, @@ -185,33 +188,24 @@ pub struct Miner { } impl Miner { - /// Creates new instance of miner without accounts, but with given spec. - pub fn with_spec(spec: &Spec) -> Miner { - Miner { - transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())), - options: Default::default(), - next_allowed_reseal: Mutex::new(Instant::now()), - sealing_block_last_request: Mutex::new(0), - sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(20), enabled: false}), - gas_range_target: RwLock::new((U256::zero(), U256::zero())), - author: RwLock::new(Address::default()), - extra_data: RwLock::new(Vec::new()), - accounts: None, - engine: spec.engine.clone(), - work_poster: None, - gas_pricer: Mutex::new(GasPricer::new_fixed(20_000_000_000u64.into())), - } - } - - /// Creates new instance of miner - pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option>) -> Arc { - let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None }; + /// Creates new instance of miner. + fn new_raw(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option>) -> Miner { + let work_poster = match options.new_work_notify.is_empty() { + true => None, + false => Some(WorkPoster::new(&options.new_work_notify)) + }; let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); - Arc::new(Miner { + Miner { transaction_queue: txq, next_allowed_reseal: Mutex::new(Instant::now()), sealing_block_last_request: Mutex::new(0), - sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(options.work_queue_size), enabled: options.force_sealing || !options.new_work_notify.is_empty()}), + sealing_work: Mutex::new(SealingWork{ + queue: UsingQueue::new(options.work_queue_size), + enabled: options.force_sealing + || !options.new_work_notify.is_empty() + || spec.engine.is_default_sealer().unwrap_or(false) + }), + seals_internally: spec.engine.is_default_sealer().is_some(), gas_range_target: RwLock::new((U256::zero(), U256::zero())), author: RwLock::new(Address::default()), extra_data: RwLock::new(Vec::new()), @@ -220,7 +214,17 @@ impl Miner { engine: spec.engine.clone(), work_poster: work_poster, gas_pricer: Mutex::new(gas_pricer), - }) + } + } + + /// Creates new instance of miner without accounts, but with given spec. + pub fn with_spec(spec: &Spec) -> Miner { + Miner::new_raw(Default::default(), GasPricer::new_fixed(20_000_000_000u64.into()), spec, None) + } + + /// Creates new instance of a miner Arc. + pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option>) -> Arc { + Arc::new(Miner::new_raw(options, gas_pricer, spec, accounts)) } fn forced_sealing(&self) -> bool { @@ -242,20 +246,17 @@ impl Miner { self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone()) } - /// Prepares new block for sealing including top transactions from queue. #[cfg_attr(feature="dev", allow(match_same_arms))] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] - fn prepare_sealing(&self, chain: &MiningBlockChainClient) { - trace!(target: "miner", "prepare_sealing: entering"); - + /// Prepares new block for sealing including top transactions from queue. + fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option) { { - trace!(target: "miner", "recalibrating..."); + trace!(target: "miner", "prepare_block: recalibrating..."); let txq = self.transaction_queue.clone(); self.gas_pricer.lock().recalibrate(move |price| { - trace!(target: "miner", "Got gas price! {}", price); + trace!(target: "miner", "prepare_block: Got gas price! {}", price); txq.lock().set_minimal_gas_price(price); }); - trace!(target: "miner", "done recalibration."); + trace!(target: "miner", "prepare_block: done recalibration."); } let (transactions, mut open_block, original_work_hash) = { @@ -273,13 +274,13 @@ impl Miner { */ let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) { Some(old_block) => { - trace!(target: "miner", "Already have previous work; updating and returning"); + trace!(target: "miner", "prepare_block: Already have previous work; updating and returning"); // add transactions to old_block old_block.reopen(&*self.engine) } None => { // block not found - create it. - trace!(target: "miner", "No existing work - making new block"); + trace!(target: "miner", "prepare_block: No existing work - making new block"); chain.prepare_open_block( self.author(), (self.gas_floor_target(), self.gas_ceil_target()), @@ -291,6 +292,7 @@ impl Miner { }; let mut invalid_transactions = HashSet::new(); + let mut transactions_to_penalize = HashSet::new(); let block_number = open_block.block().fields().header.number(); // TODO: push new uncles, too. for tx in transactions { @@ -298,6 +300,12 @@ impl Miner { match open_block.push_transaction(tx, None) { Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => { debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas); + + // Penalize transaction if it's above current gas limit + if gas > gas_limit { + transactions_to_penalize.insert(hash); + } + // Exit early if gas left is smaller then min_tx_gas let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly. if gas_limit - gas_used < min_tx_gas { @@ -333,38 +341,83 @@ impl Miner { for hash in invalid_transactions.into_iter() { queue.remove_invalid(&hash, &fetch_account); } - } - - if !block.transactions().is_empty() { - trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal."); - // block with transactions - see if we can seal immediately. - let s = self.engine.generate_seal(block.block(), match self.accounts { - Some(ref x) => Some(&**x), - None => None, - }); - if let Some(seal) = s { - trace!(target: "miner", "prepare_sealing: managed internal seal. importing..."); - if let Ok(sealed) = block.lock().try_seal(&*self.engine, seal) { - if let Ok(_) = chain.import_block(sealed.rlp_bytes()) { - trace!(target: "miner", "prepare_sealing: sealed internally and imported. leaving."); - } else { - warn!("prepare_sealing: ERROR: could not import internally sealed block. WTF?"); - } - } else { - warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?"); - } - return; - } else { - trace!(target: "miner", "prepare_sealing: unable to generate seal internally"); + for hash in transactions_to_penalize { + queue.penalize(&hash); } } + (block, original_work_hash) + } + /// Check is reseal is allowed and necessary. + fn requires_reseal(&self, best_block: BlockNumber) -> bool { + let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); + let mut sealing_work = self.sealing_work.lock(); + if sealing_work.enabled { + trace!(target: "miner", "requires_reseal: sealing enabled"); + let last_request = *self.sealing_block_last_request.lock(); + let should_disable_sealing = !self.forced_sealing() + && !has_local_transactions + && best_block > last_request + && best_block - last_request > SEALING_TIMEOUT_IN_BLOCKS; + + trace!(target: "miner", "requires_reseal: should_disable_sealing={}; best_block={}, last_request={}", should_disable_sealing, best_block, last_request); + + if should_disable_sealing { + trace!(target: "miner", "Miner sleeping (current {}, last {})", best_block, last_request); + sealing_work.enabled = false; + sealing_work.queue.reset(); + false + } else { + // sealing enabled and we don't want to sleep. + *self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; + true + } + } else { + trace!(target: "miner", "requires_reseal: sealing is disabled"); + false + } + } + + /// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed), + /// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine. + fn seal_block_internally(&self, block: ClosedBlock) -> Result> { + trace!(target: "miner", "seal_block_internally: block has transaction - attempting internal seal."); + let s = self.engine.generate_seal(block.block(), match self.accounts { + Some(ref x) => Some(&**x), + None => None, + }); + if let Some(seal) = s { + trace!(target: "miner", "seal_block_internally: managed internal seal. importing..."); + block.lock().try_seal(&*self.engine, seal).or_else(|_| { + warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?"); + Err(None) + }) + } else { + trace!(target: "miner", "seal_block_internally: unable to generate seal internally"); + Err(Some(block)) + } + } + + /// Uses Engine to seal the block internally and then imports it to chain. + fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool { + if !block.transactions().is_empty() { + if let Ok(sealed) = self.seal_block_internally(block) { + if chain.import_block(sealed.rlp_bytes()).is_ok() { + return true + } + } + } + false + } + + /// Prepares work which has to be done to seal. + fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option) { let (work, is_new) = { let mut sealing_work = self.sealing_work.lock(); let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash()); - trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); + trace!(target: "miner", "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) { - trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); + trace!(target: "miner", "prepare_work: Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); let pow_hash = block.block().fields().header.hash(); let number = block.block().fields().header.number(); let difficulty = *block.block().fields().header.difficulty(); @@ -378,7 +431,7 @@ impl Miner { } else { (None, false) }; - trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash())); + trace!(target: "miner", "prepare_work: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash())); (work, is_new) }; if is_new { @@ -392,13 +445,13 @@ impl Miner { queue.set_gas_limit(gas_limit); } - /// Returns true if we had to prepare new pending block - fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool { - trace!(target: "miner", "enable_and_prepare_sealing: entering"); + /// Returns true if we had to prepare new pending block. + fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool { + trace!(target: "miner", "prepare_work_sealing: entering"); let prepare_new = { let mut sealing_work = self.sealing_work.lock(); let have_work = sealing_work.queue.peek_last_ref().is_some(); - trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work); + trace!(target: "miner", "prepare_work_sealing: have_work={}", have_work); if !have_work { sealing_work.enabled = true; true @@ -411,12 +464,13 @@ impl Miner { // | NOTE Code below requires transaction_queue and sealing_work locks. | // | Make sure to release the locks before calling that method. | // -------------------------------------------------------------------------- - self.prepare_sealing(chain); + let (block, original_work_hash) = self.prepare_block(chain); + self.prepare_work(block, original_work_hash); } let mut sealing_block_last_request = self.sealing_block_last_request.lock(); let best_number = chain.chain_info().best_block_number; if *sealing_block_last_request != best_number { - trace!(target: "miner", "enable_and_prepare_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number); + trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number); *sealing_block_last_request = best_number; } @@ -537,6 +591,10 @@ impl MinerService for Miner { } fn set_author(&self, author: Address) { + if self.seals_internally { + let mut sealing_work = self.sealing_work.lock(); + sealing_work.enabled = self.engine.is_sealer(&author).unwrap_or(false); + } *self.author.write() = author; } @@ -625,6 +683,7 @@ impl MinerService for Miner { results } + #[cfg_attr(feature="dev", allow(collapsible_if))] fn import_own_transaction( &self, chain: &MiningBlockChainClient, @@ -635,7 +694,7 @@ impl MinerService for Miner { trace!(target: "own_tx", "Importing transaction: {:?}", transaction); let imported = { - // Be sure to release the lock before we call enable_and_prepare_sealing + // Be sure to release the lock before we call prepare_work_sealing let mut transaction_queue = self.transaction_queue.lock(); let import = self.add_transactions_to_queue( chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue @@ -661,11 +720,11 @@ impl MinerService for Miner { // -------------------------------------------------------------------------- if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() { // Make sure to do it after transaction is imported and lock is droped. - // We need to create pending block and enable sealing - let prepared = self.enable_and_prepare_sealing(chain); - // If new block has not been prepared (means we already had one) - // we need to update sealing - if !prepared { + // We need to create pending block and enable sealing. + if self.seals_internally || !self.prepare_work_sealing(chain) { + // If new block has not been prepared (means we already had one) + // or Engine might be able to seal internally, + // we need to update sealing. self.update_sealing(chain); } } @@ -767,44 +826,26 @@ impl MinerService for Miner { self.transaction_queue.lock().last_nonce(address) } + + /// Update sealing if required. + /// Prepare the block and work if the Engine does not seal internally. fn update_sealing(&self, chain: &MiningBlockChainClient) { trace!(target: "miner", "update_sealing"); - let requires_reseal = { - let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); - let mut sealing_work = self.sealing_work.lock(); - if sealing_work.enabled { - trace!(target: "miner", "update_sealing: sealing enabled"); - let current_no = chain.chain_info().best_block_number; - let last_request = *self.sealing_block_last_request.lock(); - let should_disable_sealing = !self.forced_sealing() - && !has_local_transactions - && current_no > last_request - && current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS; - trace!(target: "miner", "update_sealing: should_disable_sealing={}; current_no={}, last_request={}", should_disable_sealing, current_no, last_request); - - if should_disable_sealing { - trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request); - sealing_work.enabled = false; - sealing_work.queue.reset(); - false - } else { - // sealing enabled and we don't want to sleep. - *self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; - true - } - } else { - // sealing is disabled. - false - } - }; - - if requires_reseal { + if self.requires_reseal(chain.chain_info().best_block_number) { // -------------------------------------------------------------------------- // | NOTE Code below requires transaction_queue and sealing_work locks. | // | Make sure to release the locks before calling that method. | // -------------------------------------------------------------------------- - self.prepare_sealing(chain); + trace!(target: "miner", "update_sealing: preparing a block"); + let (block, original_work_hash) = self.prepare_block(chain); + if self.seals_internally { + trace!(target: "miner", "update_sealing: engine indicates internal sealing"); + self.seal_and_import_block_internally(chain, block); + } else { + trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work"); + self.prepare_work(block, original_work_hash); + } } } @@ -814,7 +855,7 @@ impl MinerService for Miner { fn map_sealing_work(&self, chain: &MiningBlockChainClient, f: F) -> Option where F: FnOnce(&ClosedBlock) -> T { trace!(target: "miner", "map_sealing_work: entering"); - self.enable_and_prepare_sealing(chain); + self.prepare_work_sealing(chain); trace!(target: "miner", "map_sealing_work: sealing prepared"); let mut sealing_work = self.sealing_work.lock(); let ret = sealing_work.queue.use_last_ref(); @@ -917,11 +958,12 @@ mod tests { use super::*; use util::*; use ethkey::{Generator, Random}; - use client::{TestBlockChainClient, EachBlockWith}; - use client::{TransactionImportResult}; - use types::transaction::{Transaction, Action}; + use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult}; + use header::BlockNumber; + use types::transaction::{Transaction, SignedTransaction, Action}; use block::*; use spec::Spec; + use tests::helpers::{generate_dummy_client}; #[test] fn should_prepare_block_to_seal() { @@ -975,23 +1017,24 @@ mod tests { )).ok().expect("Miner was just created.") } + fn transaction() -> SignedTransaction { + let keypair = Random.generate().unwrap(); + Transaction { + action: Action::Create, + value: U256::zero(), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::zero(), + nonce: U256::zero(), + }.sign(keypair.secret()) + } + #[test] fn should_make_pending_block_when_importing_own_transaction() { // given let client = TestBlockChainClient::default(); let miner = miner(); - let transaction = { - let keypair = Random.generate().unwrap(); - Transaction { - action: Action::Create, - value: U256::zero(), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::zero(), - }.sign(keypair.secret()) - }; - + let transaction = transaction(); // when let res = miner.import_own_transaction(&client, transaction); @@ -1002,7 +1045,7 @@ mod tests { assert_eq!(miner.pending_transactions_hashes().len(), 1); assert_eq!(miner.pending_receipts().len(), 1); // This method will let us know if pending block was created (before calling that method) - assert_eq!(miner.enable_and_prepare_sealing(&client), false); + assert!(!miner.prepare_work_sealing(&client)); } #[test] @@ -1010,18 +1053,7 @@ mod tests { // given let client = TestBlockChainClient::default(); let miner = miner(); - let transaction = { - let keypair = Random.generate().unwrap(); - Transaction { - action: Action::Create, - value: U256::zero(), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::zero(), - }.sign(keypair.secret()) - }; - + let transaction = transaction(); // when let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap(); @@ -1032,6 +1064,41 @@ mod tests { assert_eq!(miner.pending_transactions().len(), 0); assert_eq!(miner.pending_receipts().len(), 0); // This method will let us know if pending block was created (before calling that method) - assert_eq!(miner.enable_and_prepare_sealing(&client), true); + assert!(miner.prepare_work_sealing(&client)); + } + + #[test] + fn should_not_seal_unless_enabled() { + let miner = miner(); + let client = TestBlockChainClient::default(); + // By default resealing is not required. + assert!(!miner.requires_reseal(1u8.into())); + + miner.import_external_transactions(&client, vec![transaction()]).pop().unwrap().unwrap(); + assert!(miner.prepare_work_sealing(&client)); + // Unless asked to prepare work. + assert!(miner.requires_reseal(1u8.into())); + } + + #[test] + fn internal_seals_without_work() { + let miner = Miner::with_spec(&Spec::new_test_instant()); + + let c = generate_dummy_client(2); + let client = c.reference().as_ref(); + + assert_eq!(miner.import_external_transactions(client, vec![transaction()]).pop().unwrap().unwrap(), TransactionImportResult::Current); + + miner.update_sealing(client); + client.flush_queue(); + assert!(miner.pending_block().is_none()); + assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); + + assert_eq!(miner.import_own_transaction(client, transaction()).unwrap(), TransactionImportResult::Current); + + miner.update_sealing(client); + client.flush_queue(); + assert!(miner.pending_block().is_none()); + assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber); } } diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index fb00a8aa8..7db65eacb 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -81,6 +81,7 @@ //! - It removes all transactions (either from `current` or `future`) with nonce < client nonce //! - It moves matching `future` transactions to `current` +use std::ops::Deref; use std::cmp::Ordering; use std::cmp; use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap}; @@ -133,6 +134,8 @@ struct TransactionOrder { hash: H256, /// Origin of the transaction origin: TransactionOrigin, + /// Penalties + penalties: usize, } @@ -143,6 +146,7 @@ impl TransactionOrder { gas_price: tx.transaction.gas_price, hash: tx.hash(), origin: tx.origin, + penalties: 0, } } @@ -150,6 +154,11 @@ impl TransactionOrder { self.nonce_height = nonce - base_nonce; self } + + fn penalize(mut self) -> Self { + self.penalties = self.penalties.saturating_add(1); + self + } } impl Eq for TransactionOrder {} @@ -166,6 +175,11 @@ impl PartialOrd for TransactionOrder { impl Ord for TransactionOrder { fn cmp(&self, b: &TransactionOrder) -> Ordering { + // First check number of penalties + if self.penalties != b.penalties { + return self.penalties.cmp(&b.penalties); + } + // First check nonce_height if self.nonce_height != b.nonce_height { return self.nonce_height.cmp(&b.nonce_height); @@ -215,7 +229,48 @@ impl VerifiedTransaction { } fn sender(&self) -> Address { - self.transaction.sender().unwrap() + self.transaction.sender().expect("Sender is verified in new; qed") + } +} + +#[derive(Debug, Default)] +struct GasPriceQueue { + backing: BTreeMap>, +} + +impl GasPriceQueue { + /// Insert an item into a BTreeMap/HashSet "multimap". + pub fn insert(&mut self, gas_price: U256, hash: H256) -> bool { + self.backing.entry(gas_price).or_insert_with(Default::default).insert(hash) + } + + /// Remove an item from a BTreeMap/HashSet "multimap". + /// Returns true if the item was removed successfully. + pub fn remove(&mut self, gas_price: &U256, hash: &H256) -> bool { + if let Some(mut hashes) = self.backing.get_mut(gas_price) { + let only_one_left = hashes.len() == 1; + if !only_one_left { + // Operation may be ok: only if hash is in gas-price's Set. + return hashes.remove(hash); + } + if hash != hashes.iter().next().expect("We know there is only one element in collection, tested above; qed") { + // Operation failed: hash not the single item in gas-price's Set. + return false; + } + } else { + // Operation failed: gas-price not found in Map. + return false; + } + // Operation maybe ok: only if hash not found in gas-price Set. + self.backing.remove(gas_price).is_some() + } +} + +impl Deref for GasPriceQueue { + type Target=BTreeMap>; + + fn deref(&self) -> &Self::Target { + &self.backing } } @@ -227,7 +282,7 @@ impl VerifiedTransaction { struct TransactionSet { by_priority: BTreeSet, by_address: Table, - by_gas_price: BTreeMap>, + by_gas_price: GasPriceQueue, limit: usize, } @@ -245,12 +300,12 @@ impl TransactionSet { // If transaction was replaced remove it from priority queue if let Some(ref old_order) = by_address_replaced { assert!(self.by_priority.remove(old_order), "hash is in `by_address`; all transactions in `by_address` must be in `by_priority`; qed"); - assert!(Self::remove_item(&mut self.by_gas_price, &old_order.gas_price, &old_order.hash), + assert!(self.by_gas_price.remove(&old_order.gas_price, &old_order.hash), "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); } - Self::insert_item(&mut self.by_gas_price, order_gas_price, order_hash); - debug_assert_eq!(self.by_priority.len(), self.by_address.len()); - debug_assert_eq!(self.by_gas_price.iter().map(|(_, v)| v.len()).fold(0, |a, b| a + b), self.by_address.len()); + self.by_gas_price.insert(order_gas_price, order_hash); + assert_eq!(self.by_priority.len(), self.by_address.len()); + assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len()); by_address_replaced } @@ -263,6 +318,7 @@ impl TransactionSet { if len <= self.limit { return None; } + let to_drop : Vec<(Address, U256)> = { self.by_priority .iter() @@ -290,13 +346,16 @@ impl TransactionSet { /// Drop transaction from this set (remove from `by_priority` and `by_address`) fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { if let Some(tx_order) = self.by_address.remove(sender, nonce) { - assert!(Self::remove_item(&mut self.by_gas_price, &tx_order.gas_price, &tx_order.hash), + assert!(self.by_gas_price.remove(&tx_order.gas_price, &tx_order.hash), "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); - self.by_priority.remove(&tx_order); + assert!(self.by_priority.remove(&tx_order), + "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_priority`; qed"); assert_eq!(self.by_priority.len(), self.by_address.len()); + assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len()); return Some(tx_order); } assert_eq!(self.by_priority.len(), self.by_address.len()); + assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len()); None } @@ -304,7 +363,7 @@ impl TransactionSet { fn clear(&mut self) { self.by_priority.clear(); self.by_address.clear(); - self.by_gas_price.clear(); + self.by_gas_price.backing.clear(); } /// Sets new limit for number of transactions in this `TransactionSet`. @@ -321,32 +380,6 @@ impl TransactionSet { _ => U256::default(), } } - - /// Insert an item into a BTreeMap/HashSet "multimap". - fn insert_item(into: &mut BTreeMap>, gas_price: U256, hash: H256) -> bool { - into.entry(gas_price).or_insert_with(Default::default).insert(hash) - } - - /// Remove an item from a BTreeMap/HashSet "multimap". - /// Returns true if the item was removed successfully. - fn remove_item(from: &mut BTreeMap>, gas_price: &U256, hash: &H256) -> bool { - if let Some(mut hashes) = from.get_mut(gas_price) { - let only_one_left = hashes.len() == 1; - if !only_one_left { - // Operation may be ok: only if hash is in gas-price's Set. - return hashes.remove(hash); - } - if hashes.iter().next().unwrap() != hash { - // Operation failed: hash not the single item in gas-price's Set. - return false; - } - } else { - // Operation failed: gas-price not found in Map. - return false; - } - // Operation maybe ok: only if hash not found in gas-price Set. - from.remove(gas_price).is_some() - } } #[derive(Debug)] @@ -367,7 +400,7 @@ pub struct AccountDetails { } /// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue. -const GAS_LIMIT_HYSTERESIS: usize = 10; // % +const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) % /// `TransactionQueue` implementation pub struct TransactionQueue { @@ -486,8 +519,6 @@ impl TransactionQueue { pub fn add(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result where T: Fn(&Address) -> AccountDetails { - trace!(target: "txqueue", "Importing: {:?}", tx.hash()); - if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local { trace!(target: "txqueue", "Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})", @@ -573,6 +604,39 @@ impl TransactionQueue { assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len()); } + /// Penalize transactions from sender of transaction with given hash. + /// I.e. it should change the priority of the transaction in the queue. + /// + /// NOTE: We need to penalize all transactions from particular sender + /// to avoid breaking invariants in queue (ordered by nonces). + /// Consecutive transactions from this sender would fail otherwise (because of invalid nonce). + pub fn penalize(&mut self, transaction_hash: &H256) { + let transaction = match self.by_hash.get(transaction_hash) { + None => return, + Some(t) => t, + }; + let sender = transaction.sender(); + + // Penalize all transactions from this sender + let nonces_from_sender = match self.current.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in nonces_from_sender { + let order = self.current.drop(&sender, &k).unwrap(); + self.current.insert(sender, k, order.penalize()); + } + // Same thing for future + let nonces_from_sender = match self.future.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in nonces_from_sender { + let order = self.future.drop(&sender, &k).unwrap(); + self.current.insert(sender, k, order.penalize()); + } + } + /// Removes invalid transaction identified by hash from queue. /// Assumption is that this transaction nonce is not related to client nonce, /// so transactions left in queue are processed according to client nonce. @@ -588,7 +652,7 @@ impl TransactionQueue { return; } - let transaction = transaction.unwrap(); + let transaction = transaction.expect("None is tested in early-exit condition above; qed"); let sender = transaction.sender(); let nonce = transaction.nonce(); let current_nonce = fetch_account(&sender).nonce; @@ -623,7 +687,7 @@ impl TransactionQueue { None => vec![], }; for k in all_nonces_from_sender { - let order = self.future.drop(sender, &k).unwrap(); + let order = self.future.drop(sender, &k).expect("iterating over a collection that has been retrieved above; qed"); if k >= current_nonce { self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { @@ -644,7 +708,8 @@ impl TransactionQueue { for k in all_nonces_from_sender { // Goes to future or is removed - let order = self.current.drop(sender, &k).unwrap(); + let order = self.current.drop(sender, &k).expect("iterating over a collection that has been retrieved above; + qed"); if k >= current_nonce { self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { @@ -704,10 +769,11 @@ impl TransactionQueue { if let None = by_nonce { return; } - let mut by_nonce = by_nonce.unwrap(); + let mut by_nonce = by_nonce.expect("None is tested in early-exit condition above; qed"); while let Some(order) = by_nonce.remove(¤t_nonce) { - // remove also from priority and hash + // remove also from priority and gas_price self.future.by_priority.remove(&order); + self.future.by_gas_price.remove(&order.gas_price, &order.hash); // Put to current let order = order.update_height(current_nonce, first_nonce); self.current.insert(address, current_nonce, order); @@ -742,6 +808,7 @@ impl TransactionQueue { let address = tx.sender(); let nonce = tx.nonce(); + let hash = tx.hash(); let next_nonce = self.last_nonces .get(&address) @@ -763,6 +830,9 @@ impl TransactionQueue { try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash))); // Return an error if this transaction is not imported because of limit. try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash))); + + debug!(target: "txqueue", "Importing transaction to future: {:?}", hash); + debug!(target: "txqueue", "status: {:?}", self.status()); return Ok(TransactionImportResult::Future); } try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash))); @@ -789,7 +859,8 @@ impl TransactionQueue { // Trigger error if the transaction we are importing was removed. try!(check_if_removed(&address, &nonce, removed)); - trace!(target: "txqueue", "status: {:?}", self.status()); + debug!(target: "txqueue", "Imported transaction to current: {:?}", hash); + debug!(target: "txqueue", "status: {:?}", self.status()); Ok(TransactionImportResult::Current) } @@ -923,11 +994,22 @@ mod test { (tx1.sign(secret), tx2.sign(secret)) } + /// Returns two consecutive transactions, both with increased gas price + fn new_tx_pair_with_gas_price_increment(gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { + let gas = default_gas_price() + gas_price_increment; + let tx1 = new_unsigned_tx(default_nonce(), gas); + let tx2 = new_unsigned_tx(default_nonce() + 1.into(), gas); + + let keypair = Random.generate().unwrap(); + let secret = &keypair.secret(); + (tx1.sign(secret), tx2.sign(secret)) + } + fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment) } - /// Returns two transactions with identical (sender, nonce) but different gas_price/hash. + /// Returns two transactions with identical (sender, nonce) but different gas price/hash. fn new_similar_tx_pair() -> (SignedTransaction, SignedTransaction) { new_tx_pair_default(0.into(), 1.into()) } @@ -1310,6 +1392,39 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_penalize_transactions_from_sender() { + // given + let mut txq = TransactionQueue::new(); + // txa, txb - slightly bigger gas price to have consistent ordering + let (txa, txb) = new_tx_pair_default(1.into(), 0.into()); + let (tx1, tx2) = new_tx_pair_with_gas_price_increment(3.into()); + + // insert everything + txq.add(txa.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(txb.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + + let top = txq.top_transactions(); + assert_eq!(top[0], tx1); + assert_eq!(top[1], txa); + assert_eq!(top[2], tx2); + assert_eq!(top[3], txb); + assert_eq!(top.len(), 4); + + // when + txq.penalize(&tx1.hash()); + + // then + let top = txq.top_transactions(); + assert_eq!(top[0], txa); + assert_eq!(top[1], txb); + assert_eq!(top[2], tx1); + assert_eq!(top[3], tx2); + assert_eq!(top.len(), 4); + } + #[test] fn should_return_pending_hashes() { // given @@ -1395,6 +1510,9 @@ mod test { let stats = txq.status(); assert_eq!(stats.pending, 3); assert_eq!(stats.future, 0); + assert_eq!(txq.future.by_priority.len(), 0); + assert_eq!(txq.future.by_address.len(), 0); + assert_eq!(txq.future.by_gas_price.len(), 0); } #[test] diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index a2b483d40..9fa126cc7 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -94,7 +94,6 @@ impl ClientService { pruning: pruning, channel: io_service.channel(), snapshot_root: snapshot_path.into(), - client_db: client_path.into(), db_restore: client.clone(), }; let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params))); @@ -187,7 +186,7 @@ impl IoHandler for ClientIoHandler { ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); } ClientIoMessage::BeginRestoration(ref manifest) => { - if let Err(e) = self.snapshot.init_restore(manifest.clone()) { + if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) { warn!("Failed to initialize snapshot restoration: {}", e); } } diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 16c59db2e..8cfc4c96b 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -92,7 +92,8 @@ impl Account { let mut pairs = Vec::new(); - for (k, v) in db.iter() { + for item in try!(db.iter()) { + let (k, v) = try!(item); pairs.push((k, v)); } diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index 05b3281c8..4f7f912ca 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -21,10 +21,10 @@ use header::Header; use views::BlockView; use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View}; -use rlp::{Compressible, RlpType}; use util::{Bytes, Hashable, H256}; +use util::triehash::ordered_trie_root; -const HEADER_FIELDS: usize = 10; +const HEADER_FIELDS: usize = 8; const BLOCK_FIELDS: usize = 2; pub struct AbridgedBlock { @@ -61,8 +61,6 @@ impl AbridgedBlock { stream .append(&header.author()) .append(&header.state_root()) - .append(&header.transactions_root()) - .append(&header.receipts_root()) .append(&header.log_bloom()) .append(&header.difficulty()) .append(&header.gas_limit()) @@ -79,33 +77,35 @@ impl AbridgedBlock { } AbridgedBlock { - rlp: UntrustedRlp::new(stream.as_raw()).compress(RlpType::Blocks).to_vec(), + rlp: stream.out(), } } /// Flesh out an abridged block view with the provided parent hash and block number. /// /// Will fail if contains invalid rlp. - pub fn to_block(&self, parent_hash: H256, number: u64) -> Result { - let rlp = UntrustedRlp::new(&self.rlp).decompress(RlpType::Blocks); - let rlp = UntrustedRlp::new(&rlp); + pub fn to_block(&self, parent_hash: H256, number: u64, receipts_root: H256) -> Result { + let rlp = UntrustedRlp::new(&self.rlp); let mut header: Header = Default::default(); header.set_parent_hash(parent_hash); header.set_author(try!(rlp.val_at(0))); header.set_state_root(try!(rlp.val_at(1))); - header.set_transactions_root(try!(rlp.val_at(2))); - header.set_receipts_root(try!(rlp.val_at(3))); - header.set_log_bloom(try!(rlp.val_at(4))); - header.set_difficulty(try!(rlp.val_at(5))); + header.set_log_bloom(try!(rlp.val_at(2))); + header.set_difficulty(try!(rlp.val_at(3))); header.set_number(number); - header.set_gas_limit(try!(rlp.val_at(6))); - header.set_gas_used(try!(rlp.val_at(7))); - header.set_timestamp(try!(rlp.val_at(8))); - header.set_extra_data(try!(rlp.val_at(9))); + header.set_gas_limit(try!(rlp.val_at(4))); + header.set_gas_used(try!(rlp.val_at(5))); + header.set_timestamp(try!(rlp.val_at(6))); + header.set_extra_data(try!(rlp.val_at(7))); - let transactions = try!(rlp.val_at(10)); - let uncles: Vec
= try!(rlp.val_at(11)); + let transactions = try!(rlp.val_at(8)); + let uncles: Vec
= try!(rlp.val_at(9)); + + header.set_transactions_root(ordered_trie_root( + try!(rlp.at(8)).iter().map(|r| r.as_raw().to_owned()) + )); + header.set_receipts_root(receipts_root); let mut uncles_rlp = RlpStream::new(); uncles_rlp.append(&uncles); @@ -143,20 +143,22 @@ mod tests { #[test] fn empty_block_abridging() { let b = Block::default(); + let receipts_root = b.header.receipts_root().clone(); let encoded = encode_block(&b); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded)); - assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b); + assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); } #[test] #[should_panic] fn wrong_number() { let b = Block::default(); + let receipts_root = b.header.receipts_root().clone(); let encoded = encode_block(&b); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded)); - assert_eq!(abridged.to_block(H256::new(), 2).unwrap(), b); + assert_eq!(abridged.to_block(H256::new(), 2, receipts_root).unwrap(), b); } #[test] @@ -184,9 +186,14 @@ mod tests { b.transactions.push(t1); b.transactions.push(t2); + let receipts_root = b.header.receipts_root().clone(); + b.header.set_transactions_root(::util::triehash::ordered_trie_root( + b.transactions.iter().map(::rlp::encode).map(|out| out.to_vec()) + )); + let encoded = encode_block(&b); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..])); - assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b); + assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 43622fc51..2074f8174 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -15,6 +15,9 @@ // along with Parity. If not, see . //! Snapshot creation, restoration, and network service. +//! +//! Documentation of the format can be found at +//! https://github.com/ethcore/parity/wiki/%22PV64%22-Snapshot-Format use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; @@ -34,7 +37,7 @@ use util::journaldb::{self, Algorithm, JournalDB}; use util::kvdb::Database; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; use util::sha3::SHA3_NULL_RLP; -use rlp::{RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; +use rlp::{RlpStream, Stream, UntrustedRlp, View}; use self::account::Account; use self::block::AbridgedBlock; @@ -358,15 +361,15 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex Result { use basic_types::Seal::With; use util::U256; + use util::triehash::ordered_trie_root; let rlp = UntrustedRlp::new(chunk); let item_count = rlp.item_count(); @@ -585,7 +588,11 @@ impl BlockRebuilder { let abridged_rlp = try!(pair.at(0)).as_raw().to_owned(); let abridged_block = AbridgedBlock::from_raw(abridged_rlp); let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1)); - let block = try!(abridged_block.to_block(parent_hash, cur_number)); + let receipts_root = ordered_trie_root( + try!(pair.at(1)).iter().map(|r| r.as_raw().to_owned()) + ); + + let block = try!(abridged_block.to_block(parent_hash, cur_number, receipts_root)); let block_bytes = block.rlp_bytes(With); if self.rng.gen::() <= POW_VERIFY_RATE { diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 2a186378f..5243a4792 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -27,7 +27,7 @@ use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, Sna use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; -use client::Client; +use client::{BlockChainClient, Client}; use engines::Engine; use error::Error; use ids::BlockID; @@ -35,7 +35,7 @@ use service::ClientIoMessage; use io::IoChannel; -use util::{Bytes, H256, Mutex, RwLock, UtilError}; +use util::{Bytes, H256, Mutex, RwLock, RwLockReadGuard, UtilError}; use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; @@ -70,7 +70,7 @@ struct Restoration { block_chunks_left: HashSet, state: StateRebuilder, blocks: BlockRebuilder, - writer: LooseWriter, + writer: Option, snappy_buffer: Bytes, final_state_root: H256, guard: Guard, @@ -80,8 +80,8 @@ struct RestorationParams<'a> { manifest: ManifestData, // manifest to base restoration on. pruning: Algorithm, // pruning algorithm for the database. db_path: PathBuf, // database path - db_config: &'a DatabaseConfig, - writer: LooseWriter, // writer for recovered snapshot. + db_config: &'a DatabaseConfig, // configuration for the database. + writer: Option, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. guard: Guard, // guard for the restoration directory. } @@ -120,7 +120,10 @@ impl Restoration { let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); try!(self.state.feed(&self.snappy_buffer[..len])); - try!(self.writer.write_state_chunk(hash, chunk)); + + if let Some(ref mut writer) = self.writer.as_mut() { + try!(writer.write_state_chunk(hash, chunk)); + } } Ok(()) @@ -132,7 +135,9 @@ impl Restoration { let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); - try!(self.writer.write_block_chunk(hash, chunk)); + if let Some(ref mut writer) = self.writer.as_mut() { + try!(writer.write_block_chunk(hash, chunk)); + } } Ok(()) @@ -157,7 +162,9 @@ impl Restoration { // connect out-of-order chunks. self.blocks.glue_chunks(); - try!(self.writer.finish(self.manifest)); + if let Some(writer) = self.writer { + try!(writer.finish(self.manifest)); + } self.guard.disarm(); Ok(()) @@ -187,9 +194,6 @@ pub struct ServiceParams { /// The directory to put snapshots in. /// Usually "/snapshot" pub snapshot_root: PathBuf, - /// The client's database directory. - /// Usually "//db". - pub client_db: PathBuf, /// A handle for database restoration. pub db_restore: Arc, } @@ -198,7 +202,6 @@ pub struct ServiceParams { /// This controls taking snapshots and restoring from them. pub struct Service { restoration: Mutex>, - client_db: PathBuf, snapshot_root: PathBuf, db_config: DatabaseConfig, io_channel: Channel, @@ -219,7 +222,6 @@ impl Service { pub fn new(params: ServiceParams) -> Result { let mut service = Service { restoration: Mutex::new(None), - client_db: params.client_db, snapshot_root: params.snapshot_root, db_config: params.db_config, io_channel: params.channel, @@ -301,11 +303,15 @@ impl Service { fn replace_client_db(&self) -> Result<(), Error> { let our_db = self.restoration_db(); - trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db); - try!(self.db_restore.restore_db(our_db.to_str().unwrap())); + try!(self.db_restore.restore_db(&*our_db.to_string_lossy())); Ok(()) } + /// Get a reference to the snapshot reader. + pub fn reader(&self) -> RwLockReadGuard> { + self.reader.read() + } + /// Tick the snapshot service. This will log any active snapshot /// being taken. pub fn tick(&self) { @@ -339,7 +345,17 @@ impl Service { let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress); self.taking_snapshot.store(false, Ordering::SeqCst); - try!(res); + if let Err(e) = res { + if client.chain_info().best_block_number >= num + ::client::HISTORY { + // "Cancelled" is mincing words a bit -- what really happened + // is that the state we were snapshotting got pruned out + // before we could finish. + info!("Cancelled prematurely-started periodic snapshot."); + return Ok(()) + } else { + return Err(e); + } + } info!("Finished taking snapshot at #{}", num); @@ -348,6 +364,10 @@ impl Service { // destroy the old snapshot reader. *reader = None; + if snapshot_dir.exists() { + try!(fs::remove_dir_all(&snapshot_dir)); + } + try!(fs::rename(temp_dir, &snapshot_dir)); *reader = Some(try!(LooseReader::new(snapshot_dir))); @@ -357,11 +377,15 @@ impl Service { } /// Initialize the restoration synchronously. - pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> { + /// The recover flag indicates whether to recover the restored snapshot. + pub fn init_restore(&self, manifest: ManifestData, recover: bool) -> Result<(), Error> { let rest_dir = self.restoration_dir(); let mut res = self.restoration.lock(); + self.state_chunks.store(0, Ordering::SeqCst); + self.block_chunks.store(0, Ordering::SeqCst); + // tear down existing restoration. *res = None; @@ -376,7 +400,10 @@ impl Service { try!(fs::create_dir_all(&rest_dir)); // make new restoration. - let writer = try!(LooseWriter::new(self.temp_recovery_dir())); + let writer = match recover { + true => Some(try!(LooseWriter::new(self.temp_recovery_dir()))), + false => None + }; let params = RestorationParams { manifest: manifest, @@ -391,8 +418,8 @@ impl Service { *res = Some(try!(Restoration::new(params))); *self.status.lock() = RestorationStatus::Ongoing { - state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32, - block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32, + state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32, + block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32, }; Ok(()) } @@ -403,35 +430,30 @@ impl Service { fn finalize_restoration(&self, rest: &mut Option) -> Result<(), Error> { trace!(target: "snapshot", "finalizing restoration"); - self.state_chunks.store(0, Ordering::SeqCst); - self.block_chunks.store(0, Ordering::SeqCst); + let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); // destroy the restoration before replacing databases and snapshot. try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(()))); try!(self.replace_client_db()); - let mut reader = self.reader.write(); - *reader = None; // destroy the old reader if it existed. + if recover { + let mut reader = self.reader.write(); + *reader = None; // destroy the old reader if it existed. - let snapshot_dir = self.snapshot_dir(); + let snapshot_dir = self.snapshot_dir(); - trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); - if let Err(e) = fs::remove_dir_all(&snapshot_dir) { - match e.kind() { - ErrorKind::NotFound => {} - _ => return Err(e.into()), + if snapshot_dir.exists() { + trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); + try!(fs::remove_dir_all(&snapshot_dir)); } + + trace!(target: "snapshot", "copying restored snapshot files over"); + try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); + + *reader = Some(try!(LooseReader::new(snapshot_dir))); } - try!(fs::create_dir(&snapshot_dir)); - - trace!(target: "snapshot", "copying restored snapshot files over"); - try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); - let _ = fs::remove_dir_all(self.restoration_dir()); - - *reader = Some(try!(LooseReader::new(snapshot_dir))); - *self.status.lock() = RestorationStatus::Inactive; Ok(()) @@ -512,7 +534,13 @@ impl SnapshotService for Service { } fn status(&self) -> RestorationStatus { - *self.status.lock() + let mut cur_status = self.status.lock(); + if let RestorationStatus::Ongoing { ref mut state_chunks_done, ref mut block_chunks_done } = *cur_status { + *state_chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32; + *block_chunks_done = self.block_chunks.load(Ordering::SeqCst) as u32; + } + + cur_status.clone() } fn begin_restore(&self, manifest: ManifestData) { @@ -523,12 +551,6 @@ impl SnapshotService for Service { fn abort_restore(&self) { *self.restoration.lock() = None; *self.status.lock() = RestorationStatus::Inactive; - if let Err(e) = fs::remove_dir_all(&self.restoration_dir()) { - match e.kind() { - ErrorKind::NotFound => {}, - _ => warn!("encountered error {} while deleting snapshot restoration dir.", e), - } - } } fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { @@ -585,7 +607,6 @@ mod tests { pruning: Algorithm::Archive, channel: service.channel(), snapshot_root: dir, - client_db: client_db, db_restore: Arc::new(NoopDBRestore), }; diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 34c57df67..cb928346e 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -45,14 +45,16 @@ impl StateProducer { } } + #[cfg_attr(feature="dev", allow(let_and_return))] /// Tick the state producer. This alters the state, writing new data into /// the database. pub fn tick(&mut self, rng: &mut R, db: &mut HashDB) { // modify existing accounts. let mut accounts_to_modify: Vec<_> = { let trie = TrieDB::new(&*db, &self.state_root).unwrap(); - let temp = trie.iter() // binding required due to complicated lifetime stuff + let temp = trie.iter().unwrap() // binding required due to complicated lifetime stuff .filter(|_| rng.gen::() < ACCOUNT_CHURN) + .map(Result::unwrap) .map(|(k, v)| (H256::from_slice(&k), v.to_owned())) .collect(); diff --git a/ethcore/src/snapshot/tests/mod.rs b/ethcore/src/snapshot/tests/mod.rs index 84096bead..d9c0abc73 100644 --- a/ethcore/src/snapshot/tests/mod.rs +++ b/ethcore/src/snapshot/tests/mod.rs @@ -18,6 +18,7 @@ mod blocks; mod state; +mod service; pub mod helpers; diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs new file mode 100644 index 000000000..e136985c6 --- /dev/null +++ b/ethcore/src/snapshot/tests/service.rs @@ -0,0 +1,143 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tests for the snapshot service. + +use std::sync::Arc; + +use client::{BlockChainClient, Client}; +use ids::BlockID; +use snapshot::service::{Service, ServiceParams}; +use snapshot::{self, ManifestData, SnapshotService}; +use spec::Spec; +use tests::helpers::generate_dummy_client_with_spec_and_data; + +use devtools::RandomTempPath; +use io::IoChannel; +use util::kvdb::DatabaseConfig; + +struct NoopDBRestore; + +impl snapshot::DatabaseRestore for NoopDBRestore { + fn restore_db(&self, _new_db: &str) -> Result<(), ::error::Error> { + Ok(()) + } +} + +#[test] +fn restored_is_equivalent() { + const NUM_BLOCKS: u32 = 400; + const TX_PER: usize = 5; + + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + + let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices); + + let path = RandomTempPath::create_dir(); + let mut path = path.as_path().clone(); + let mut client_db = path.clone(); + + client_db.push("client_db"); + path.push("snapshot"); + + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let spec = Spec::new_null(); + let client2 = Client::new( + Default::default(), + &spec, + &client_db, + Arc::new(::miner::Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config, + ).unwrap(); + + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: db_config, + pruning: ::util::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: path, + db_restore: client2.clone(), + }; + + let service = Service::new(service_params).unwrap(); + service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap(); + + let manifest = service.manifest().unwrap(); + + service.init_restore(manifest.clone(), true).unwrap(); + assert!(service.init_restore(manifest.clone(), true).is_ok()); + + for hash in manifest.state_hashes { + let chunk = service.chunk(hash).unwrap(); + service.feed_state_chunk(hash, &chunk); + } + + for hash in manifest.block_hashes { + let chunk = service.chunk(hash).unwrap(); + service.feed_block_chunk(hash, &chunk); + } + + assert_eq!(service.status(), ::snapshot::RestorationStatus::Inactive); + + for x in 0..NUM_BLOCKS { + let block1 = client.block(BlockID::Number(x as u64)).unwrap(); + let block2 = client2.block(BlockID::Number(x as u64)).unwrap(); + + assert_eq!(block1, block2); + } +} + +#[test] +fn guards_delete_folders() { + let spec = Spec::new_null(); + let path = RandomTempPath::create_dir(); + let mut path = path.as_path().clone(); + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS), + pruning: ::util::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: path.clone(), + db_restore: Arc::new(NoopDBRestore), + }; + + let service = Service::new(service_params).unwrap(); + path.push("restoration"); + + let manifest = ManifestData { + state_hashes: vec![], + block_hashes: vec![], + block_number: 0, + block_hash: Default::default(), + state_root: Default::default(), + }; + + service.init_restore(manifest.clone(), true).unwrap(); + assert!(path.exists()); + + service.abort_restore(); + assert!(!path.exists()); + + service.init_restore(manifest.clone(), true).unwrap(); + assert!(path.exists()); + + drop(service); + assert!(!path.exists()); +} \ No newline at end of file diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 1eae0f3b3..f02136e0c 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -184,7 +184,7 @@ impl Spec { let r = Rlp::new(&seal); (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() }); - return header; + header } /// Compose the genesis block for this chain. @@ -261,6 +261,11 @@ impl Spec { pub fn new_null() -> Self { Spec::load(include_bytes!("../../res/null.json") as &[u8]).expect("null.json is invalid") } + + /// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring work). + pub fn new_test_instant() -> Self { + Spec::load(include_bytes!("../../res/instant_seal.json") as &[u8]).expect("instant_seal.json is invalid") + } } #[cfg(test)] @@ -274,7 +279,7 @@ mod tests { // https://github.com/ethcore/parity/issues/1840 #[test] fn test_load_empty() { - assert!(Spec::load(&vec![] as &[u8]).is_err()); + assert!(Spec::load(&[] as &[u8]).is_err()); } #[test] diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 7a3b7b7ee..e1299d1dc 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -191,10 +191,16 @@ impl State { })) } - /// Mutate storage of account `a` so that it is `value` for `key`. + /// Get the code of account `a`. pub fn code(&self, a: &Address) -> Option { self.ensure_cached(a, true, - |a| a.as_ref().map_or(None, |a|a.code().map(|x|x.to_vec()))) + |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec()))) + } + + /// Get the code size of account `a`. + pub fn code_size(&self, a: &Address) -> Option { + self.ensure_cached(a, true, + |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.len()))) } /// Add `incr` to the balance of account `a`. @@ -420,10 +426,27 @@ impl fmt::Debug for State { impl Clone for State { fn clone(&self) -> State { + let cache = { + let mut cache = HashMap::new(); + for (key, val) in self.cache.borrow().iter() { + let key = key.clone(); + match *val { + Some(ref acc) if acc.is_dirty() => { + cache.insert(key, Some(acc.clone())); + }, + None => { + cache.insert(key, None); + }, + _ => {}, + } + } + cache + }; + State { db: self.db.boxed_clone(), root: self.root.clone(), - cache: RefCell::new(self.cache.borrow().clone()), + cache: RefCell::new(cache), snapshots: RefCell::new(self.snapshots.borrow().clone()), account_start_nonce: self.account_start_nonce.clone(), factories: self.factories.clone(), @@ -1314,13 +1337,13 @@ fn storage_at_from_database() { let temp = RandomTempPath::new(); let (root, db) = { let mut state = get_temp_state_in(temp.as_path()); - state.set_storage(&a, H256::from(&U256::from(01u64)), H256::from(&U256::from(69u64))); + state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))); state.commit().unwrap(); state.drop() }; let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(s.storage_at(&a, &H256::from(&U256::from(01u64))), H256::from(&U256::from(69u64))); + assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))), H256::from(&U256::from(69u64))); } #[test] diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index ff4e09dc9..dc95e8267 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -19,6 +19,7 @@ use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, Blo use ethereum; use block::IsBlock; use tests::helpers::*; +use types::filter::Filter; use common::*; use devtools::*; use miner::Miner; @@ -131,6 +132,36 @@ fn returns_chain_info() { assert_eq!(info.best_block_hash, block.header().hash()); } +#[test] +fn returns_logs() { + let dummy_block = get_good_dummy_block(); + let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); + let client = client_result.reference(); + let logs = client.logs(Filter { + from_block: BlockID::Earliest, + to_block: BlockID::Latest, + address: None, + topics: vec![], + limit: None, + }); + assert_eq!(logs.len(), 0); +} + +#[test] +fn returns_logs_with_limit() { + let dummy_block = get_good_dummy_block(); + let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); + let client = client_result.reference(); + let logs = client.logs(Filter { + from_block: BlockID::Earliest, + to_block: BlockID::Latest, + address: None, + topics: vec![], + limit: Some(2), + }); + assert_eq!(logs.len(), 0); +} + #[test] fn returns_block_body() { let dummy_block = get_good_dummy_block(); diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index 202e42988..d5d88c087 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -56,7 +56,7 @@ fn can_handshake() { let stop_guard = StopGuard::new(); let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; run_test_worker(scope, stop_guard.share(), socket_path); - let remote_client = nanoipc::init_client::>(socket_path).unwrap(); + let remote_client = nanoipc::generic_client::>(socket_path).unwrap(); assert!(remote_client.handshake().is_ok()); }) @@ -68,7 +68,7 @@ fn can_query_block() { let stop_guard = StopGuard::new(); let socket_path = "ipc:///tmp/parity-client-rpc-20.ipc"; run_test_worker(scope, stop_guard.share(), socket_path); - let remote_client = nanoipc::init_client::>(socket_path).unwrap(); + let remote_client = nanoipc::generic_client::>(socket_path).unwrap(); let non_existant_block = remote_client.block_header(BlockID::Number(999)); diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs index ff96cea74..9dab7524d 100644 --- a/ethcore/src/trace/config.rs +++ b/ethcore/src/trace/config.rs @@ -15,57 +15,14 @@ // along with Parity. If not, see . //! Traces config. -use std::str::FromStr; use bloomchain::Config as BloomConfig; -use trace::Error; - -/// 3-value enum. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum Switch { - /// True. - On, - /// False. - Off, - /// Auto. - Auto, -} - -impl Default for Switch { - fn default() -> Self { - Switch::Auto - } -} - -impl FromStr for Switch { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "on" => Ok(Switch::On), - "off" => Ok(Switch::Off), - "auto" => Ok(Switch::Auto), - other => Err(format!("Invalid switch value: {}", other)) - } - } -} - -impl Switch { - /// Tries to turn old switch to new value. - pub fn turn_to(&self, to: Switch) -> Result { - match (*self, to) { - (Switch::On, Switch::On) | (Switch::On, Switch::Auto) | (Switch::Auto, Switch::On) => Ok(true), - (Switch::Off, Switch::On) => Err(Error::ResyncRequired), - _ => Ok(false), - } - } -} /// Traces config. #[derive(Debug, PartialEq, Clone)] pub struct Config { /// Indicates if tracing should be enabled or not. /// If it's None, it will be automatically configured. - pub enabled: Switch, + pub enabled: bool, /// Traces blooms configuration. pub blooms: BloomConfig, /// Preferef cache-size. @@ -77,7 +34,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: Switch::default(), + enabled: false, blooms: BloomConfig { levels: 3, elements_per_index: 16, @@ -87,20 +44,3 @@ impl Default for Config { } } } - -#[cfg(test)] -mod tests { - use super::Switch; - - #[test] - fn test_switch_parsing() { - assert_eq!(Switch::On, "on".parse().unwrap()); - assert_eq!(Switch::Off, "off".parse().unwrap()); - assert_eq!(Switch::Auto, "auto".parse().unwrap()); - } - - #[test] - fn test_switch_default() { - assert_eq!(Switch::default(), Switch::Auto); - } -} diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index e7bd7c825..b608ad685 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -22,7 +22,7 @@ use bloomchain::{Number, Config as BloomConfig}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf}; use header::BlockNumber; -use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error}; +use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras}; use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; use blooms; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; @@ -126,38 +126,20 @@ impl BloomGroupDatabase for TraceDB where T: DatabaseExtras { impl TraceDB where T: DatabaseExtras { /// Creates new instance of `TraceDB`. - pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Result { - // check if in previously tracing was enabled - let old_tracing = match tracesdb.get(db::COL_TRACE, b"enabled").unwrap() { - Some(ref value) if value as &[u8] == &[0x1] => Switch::On, - Some(ref value) if value as &[u8] == &[0x0] => Switch::Off, - Some(_) => { panic!("tracesdb is corrupted") }, - None => Switch::Auto, - }; - - let enabled = try!(old_tracing.turn_to(config.enabled)); - - let encoded_tracing = match enabled { - true => [0x1], - false => [0x0] - }; - + pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Self { let mut batch = DBTransaction::new(&tracesdb); - batch.put(db::COL_TRACE, b"enabled", &encoded_tracing); batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); tracesdb.write(batch).unwrap(); - let db = TraceDB { + TraceDB { traces: RwLock::new(HashMap::new()), blooms: RwLock::new(HashMap::new()), cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)), tracesdb: tracesdb, bloom_config: config.blooms, - enabled: enabled, + enabled: config.enabled, extras: extras, - }; - - Ok(db) + } } fn cache_size(&self) -> usize { @@ -419,7 +401,7 @@ mod tests { use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction}; use devtools::RandomTempPath; use header::BlockNumber; - use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; + use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; use trace::trace::{Call, Action, Res}; use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; @@ -474,22 +456,10 @@ mod tests { let mut config = Config::default(); // set autotracing - config.enabled = Switch::Auto; + config.enabled = false; { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), false); - } - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), false); - } - - config.enabled = Switch::Off; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); assert_eq!(tracedb.tracing_enabled(), false); } } @@ -501,50 +471,12 @@ mod tests { let mut config = Config::default(); // set tracing on - config.enabled = Switch::On; + config.enabled = true; { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); assert_eq!(tracedb.tracing_enabled(), true); } - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), true); - } - - config.enabled = Switch::Auto; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), true); - } - - config.enabled = Switch::Off; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), false); - } - } - - #[test] - #[should_panic] - fn test_invalid_reopening_db() { - let temp = RandomTempPath::new(); - let db = new_db(temp.as_str()); - let mut config = Config::default(); - - // set tracing on - config.enabled = Switch::Off; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), true); - } - - config.enabled = Switch::On; - TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic! } fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { @@ -595,7 +527,7 @@ mod tests { let temp = RandomTempPath::new(); let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap()); let mut config = Config::default(); - config.enabled = Switch::On; + config.enabled = true; let block_0 = H256::from(0xa1); let block_1 = H256::from(0xa2); let tx_0 = H256::from(0xff); @@ -607,7 +539,7 @@ mod tests { extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(1, vec![tx_1.clone()]); - let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap(); + let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)); // import block 0 let request = create_simple_import_request(0, block_0.clone()); @@ -679,10 +611,10 @@ mod tests { extras.transaction_hashes.insert(0, vec![tx_0.clone()]); // set tracing on - config.enabled = Switch::On; + config.enabled = true; { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())); // import block 0 let request = create_simple_import_request(0, block_0.clone()); @@ -692,7 +624,7 @@ mod tests { } { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)); let traces = tracedb.transaction_traces(0, 0); assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]); } diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 06604450f..da3bbc02b 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -26,7 +26,7 @@ mod noop_tracer; pub use types::trace_types::{filter, flat, localized, trace}; pub use types::trace_types::error::Error as TraceError; -pub use self::config::{Config, Switch}; +pub use self::config::Config; pub use self::db::TraceDB; pub use self::error::Error; pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; diff --git a/ethcore/src/types/filter.rs b/ethcore/src/types/filter.rs index 91338899f..6274d63f4 100644 --- a/ethcore/src/types/filter.rs +++ b/ethcore/src/types/filter.rs @@ -41,6 +41,12 @@ pub struct Filter { /// If None, match all. /// If specified, log must contain one of these topics. pub topics: Vec>>, + + /// Logs limit + /// + /// If None, return all logs + /// If specified, should only return *last* `n` logs. + pub limit: Option, } impl Clone for Filter { @@ -59,7 +65,8 @@ impl Clone for Filter { from_block: self.from_block.clone(), to_block: self.to_block.clone(), address: self.address.clone(), - topics: topics[..].to_vec() + topics: topics[..].to_vec(), + limit: self.limit, } } } @@ -117,6 +124,7 @@ mod tests { to_block: BlockID::Latest, address: None, topics: vec![None, None, None, None], + limit: None, }; let possibilities = none_filter.bloom_possibilities(); @@ -136,7 +144,8 @@ mod tests { None, None, None, - ] + ], + limit: None, }; let possibilities = filter.bloom_possibilities(); @@ -154,7 +163,8 @@ mod tests { Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), None, None, - ] + ], + limit: None, }; let possibilities = filter.bloom_possibilities(); @@ -181,7 +191,8 @@ mod tests { ]), Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), None - ] + ], + limit: None, }; // number of possibilites should be equal 2 * 2 * 2 * 1 = 8 @@ -201,7 +212,8 @@ mod tests { Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]), None, None, - ] + ], + limit: None, }; let entry0 = LogEntry { diff --git a/ethcore/src/types/transaction.rs b/ethcore/src/types/transaction.rs index f7e582f11..386b85f7e 100644 --- a/ethcore/src/types/transaction.rs +++ b/ethcore/src/types/transaction.rs @@ -21,7 +21,7 @@ use std::cell::*; use rlp::*; use util::sha3::Hashable; use util::{H256, Address, U256, Bytes}; -use ethkey::{Signature, sign, Secret, recover, public_to_address, Error as EthkeyError}; +use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError}; use error::*; use evm::Schedule; use header::BlockNumber; @@ -305,13 +305,18 @@ impl SignedTransaction { match sender { Some(s) => Ok(s), None => { - let s = public_to_address(&try!(recover(&self.signature(), &self.unsigned.hash()))); + let s = public_to_address(&try!(self.public_key())); self.sender.set(Some(s)); Ok(s) } } } + /// Returns the public key of the sender. + pub fn public_key(&self) -> Result { + Ok(try!(recover(&self.signature(), &self.unsigned.hash()))) + } + /// Do basic validation, checking for valid signature and minimum gas, // TODO: consider use in block validation. #[cfg(test)] diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index db3a9314c..4e1305a33 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -215,7 +215,7 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> { fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> { let block = UntrustedRlp::new(block); let tx = try!(block.at(1)); - let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here + let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here if expected_root != transactions_root { return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() }))) } @@ -241,6 +241,7 @@ mod tests { use spec::*; use transaction::*; use tests::helpers::*; + use types::log_entry::{LogEntry, LocalizedLogEntry}; use rlp::View; fn check_ok(result: Result<(), Error>) { @@ -333,6 +334,12 @@ mod tests { fn block_receipts(&self, _hash: &H256) -> Option { unimplemented!() } + + + fn logs(&self, _blocks: Vec, _matches: F, _limit: Option) -> Vec + where F: Fn(&LogEntry) -> bool, Self: Sized { + unimplemented!() + } } fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> { @@ -415,7 +422,7 @@ mod tests { let mut uncles_rlp = RlpStream::new(); uncles_rlp.append(&good_uncles); let good_uncles_hash = uncles_rlp.as_raw().sha3(); - let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::(t).to_vec()).collect()); + let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::(t).to_vec())); let mut parent = good.clone(); parent.set_number(9); diff --git a/ethcrypto/Cargo.toml b/ethcrypto/Cargo.toml index 85298266d..5c638b555 100644 --- a/ethcrypto/Cargo.toml +++ b/ethcrypto/Cargo.toml @@ -8,5 +8,5 @@ rust-crypto = "0.2.36" tiny-keccak = "1.0" eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } ethkey = { path = "../ethkey" } -bigint = { path = "../util/bigint" } +ethcore-bigint = { path = "../util/bigint" } diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index 9263e32c9..7a1aba48c 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -16,12 +16,13 @@ //! Crypto utils used ethstore and network. -extern crate bigint; +extern crate ethcore_bigint as bigint; extern crate tiny_keccak; extern crate crypto as rcrypto; extern crate secp256k1; extern crate ethkey; +use std::fmt; use tiny_keccak::Keccak; use rcrypto::pbkdf2::pbkdf2; use rcrypto::scrypt::{scrypt, ScryptParams}; @@ -39,6 +40,17 @@ pub enum Error { InvalidMessage, } +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let s = match *self { + Error::Secp(ref err) => err.to_string(), + Error::InvalidMessage => "Invalid message".into(), + }; + + write!(f, "{}", s) + } +} + impl From for Error { fn from(e: SecpError) -> Self { Error::Secp(e) diff --git a/ethkey/Cargo.toml b/ethkey/Cargo.toml index 6253d5a39..319a38b20 100644 --- a/ethkey/Cargo.toml +++ b/ethkey/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "ethkey" version = "0.2.0" -authors = ["debris "] +authors = ["Ethcore "] [dependencies] rand = "0.3.14" @@ -10,7 +10,7 @@ tiny-keccak = "1.0" eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } rustc-serialize = "0.3" docopt = { version = "0.6", optional = true } -bigint = { path = "../util/bigint" } +ethcore-bigint = { path = "../util/bigint" } [features] default = [] diff --git a/ethkey/src/lib.rs b/ethkey/src/lib.rs index 0bce090a5..1f345b2ff 100644 --- a/ethkey/src/lib.rs +++ b/ethkey/src/lib.rs @@ -20,7 +20,7 @@ extern crate lazy_static; extern crate tiny_keccak; extern crate secp256k1; extern crate rustc_serialize; -extern crate bigint; +extern crate ethcore_bigint as bigint; mod brain; mod error; diff --git a/ethstore/src/account/safe_account.rs b/ethstore/src/account/safe_account.rs index 56e8494f7..38069a718 100644 --- a/ethstore/src/account/safe_account.rs +++ b/ethstore/src/account/safe_account.rs @@ -16,7 +16,7 @@ use ethkey::{KeyPair, sign, Address, Secret, Signature, Message}; use {json, Error, crypto}; -use crypto::Keccak256; +use crypto::{Keccak256}; use random::Random; use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf}; @@ -170,6 +170,11 @@ impl SafeAccount { sign(&secret, message).map_err(From::from) } + pub fn decrypt(&self, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let secret = try!(self.crypto.secret(password)); + crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) + } + pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result { let secret = try!(self.crypto.secret(old_password)); let result = SafeAccount { diff --git a/ethstore/src/error.rs b/ethstore/src/error.rs index a5d3de745..cee689b24 100644 --- a/ethstore/src/error.rs +++ b/ethstore/src/error.rs @@ -17,6 +17,7 @@ use std::fmt; use std::io::Error as IoError; use ethkey::Error as EthKeyError; +use crypto::Error as EthCryptoError; #[derive(Debug)] pub enum Error { @@ -28,6 +29,7 @@ pub enum Error { InvalidKeyFile(String), CreationFailed, EthKey(EthKeyError), + EthCrypto(EthCryptoError), Custom(String), } @@ -42,6 +44,7 @@ impl fmt::Display for Error { Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason), Error::CreationFailed => "Account creation failed".into(), Error::EthKey(ref err) => err.to_string(), + Error::EthCrypto(ref err) => err.to_string(), Error::Custom(ref s) => s.clone(), }; @@ -60,3 +63,9 @@ impl From for Error { Error::EthKey(err) } } + +impl From for Error { + fn from(err: EthCryptoError) -> Self { + Error::EthCrypto(err) + } +} diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index 29f4c757c..8de988b9a 100644 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -144,6 +144,11 @@ impl SecretStore for EthStore { account.sign(password, message) } + fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let account = try!(self.get(account)); + account.decrypt(password, shared_mac, message) + } + fn uuid(&self, address: &Address) -> Result { let account = try!(self.get(address)); Ok(account.id.into()) diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 90ed79fb5..aa79cb8b6 100644 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -20,33 +20,24 @@ use json::UUID; pub trait SecretStore: Send + Sync { fn insert_account(&self, secret: Secret, password: &str) -> Result; - fn import_presale(&self, json: &[u8], password: &str) -> Result; - fn import_wallet(&self, json: &[u8], password: &str) -> Result; - - fn accounts(&self) -> Result, Error>; - fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>; - fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>; fn sign(&self, account: &Address, password: &str, message: &Message) -> Result; + fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error>; + fn accounts(&self) -> Result, Error>; fn uuid(&self, account: &Address) -> Result; - fn name(&self, account: &Address) -> Result; - fn meta(&self, account: &Address) -> Result; fn set_name(&self, address: &Address, name: String) -> Result<(), Error>; - fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error>; fn local_path(&self) -> String; - fn list_geth_accounts(&self, testnet: bool) -> Vec
; - fn import_geth_accounts(&self, desired: Vec
, testnet: bool) -> Result, Error>; } diff --git a/ipc/hypervisor/src/lib.rs b/ipc/hypervisor/src/lib.rs index 3cfd464e9..78b8b04ce 100644 --- a/ipc/hypervisor/src/lib.rs +++ b/ipc/hypervisor/src/lib.rs @@ -240,7 +240,7 @@ mod tests { ::std::thread::spawn(move || { while !hypervisor_ready.load(Ordering::Relaxed) { } - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::fast_client::>(url).unwrap(); client.handshake().unwrap(); client.module_ready(test_module_id); }); diff --git a/ipc/hypervisor/src/service.rs.in b/ipc/hypervisor/src/service.rs.in index 938cea345..74d289f50 100644 --- a/ipc/hypervisor/src/service.rs.in +++ b/ipc/hypervisor/src/service.rs.in @@ -110,7 +110,7 @@ impl HypervisorService { let modules = self.modules.read().unwrap(); modules.get(&module_id).map(|module| { trace!(target: "hypervisor", "Sending shutdown to {}({})", module_id, &module.control_url); - let client = nanoipc::init_client::>(&module.control_url).unwrap(); + let client = nanoipc::fast_client::>(&module.control_url).unwrap(); client.shutdown(); trace!(target: "hypervisor", "Sent shutdown to {}", module_id); }); diff --git a/ipc/nano/Cargo.toml b/ipc/nano/Cargo.toml index ee399e60f..b358eb23a 100644 --- a/ipc/nano/Cargo.toml +++ b/ipc/nano/Cargo.toml @@ -10,4 +10,4 @@ license = "GPL-3.0" ethcore-ipc = { path = "../rpc" } nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" } log = "0.3" - +lazy_static = "0.2" diff --git a/ipc/nano/src/lib.rs b/ipc/nano/src/lib.rs index da48151a6..1157e75d3 100644 --- a/ipc/nano/src/lib.rs +++ b/ipc/nano/src/lib.rs @@ -19,6 +19,7 @@ extern crate ethcore_ipc as ipc; extern crate nanomsg; #[macro_use] extern crate log; +#[macro_use] extern crate lazy_static; pub use ipc::{WithSocket, IpcInterface, IpcConfig}; pub use nanomsg::Socket as NanoSocket; @@ -28,7 +29,8 @@ use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut} use std::ops::Deref; const POLL_TIMEOUT: isize = 200; -const CLIENT_CONNECTION_TIMEOUT: isize = 120000; +const DEFAULT_CONNECTION_TIMEOUT: isize = 30000; +const DEBUG_CONNECTION_TIMEOUT: isize = 5000; /// Generic worker to handle service (binded) sockets pub struct Worker where S: IpcInterface { @@ -68,7 +70,7 @@ pub fn init_duplex_client(socket_addr: &str) -> Result, Sock SocketError::DuplexLink })); - socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap(); + socket.set_receive_timeout(DEFAULT_CONNECTION_TIMEOUT).unwrap(); let endpoint = try!(socket.connect(socket_addr).map_err(|e| { warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e); @@ -84,26 +86,58 @@ pub fn init_duplex_client(socket_addr: &str) -> Result, Sock /// Spawns client <`S`> over specified address /// creates socket and connects endpoint to it /// for request-reply connections to the service -pub fn init_client(socket_addr: &str) -> Result, SocketError> where S: WithSocket { +pub fn client(socket_addr: &str, receive_timeout: Option) -> Result, SocketError> where S: WithSocket { let mut socket = try!(Socket::new(Protocol::Req).map_err(|e| { warn!(target: "ipc", "Failed to create ipc socket: {:?}", e); SocketError::RequestLink })); - socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap(); + if let Some(timeout) = receive_timeout { + socket.set_receive_timeout(timeout).unwrap(); + } let endpoint = try!(socket.connect(socket_addr).map_err(|e| { warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e); SocketError::RequestLink })); - trace!(target: "ipc", "Created cleint for {}", socket_addr); + trace!(target: "ipc", "Created client for {}", socket_addr); Ok(GuardedSocket { client: Arc::new(S::init(socket)), _endpoint: endpoint, }) } +lazy_static! { + /// Set PARITY_IPC_DEBUG=1 for fail-fast connectivity problems diagnostic + pub static ref DEBUG_FLAG: bool = { + use std::env; + + if let Ok(debug) = env::var("PARITY_IPC_DEBUG") { + debug == "1" || debug.to_uppercase() == "TRUE" + } + else { false } + }; +} + +/// Client with no default timeout on operations +pub fn generic_client(socket_addr: &str) -> Result, SocketError> where S: WithSocket { + if *DEBUG_FLAG { + client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT)) + } else { + client(socket_addr, None) + } +} + +/// Client over interface that is supposed to give quick almost non-blocking responses +pub fn fast_client(socket_addr: &str) -> Result, SocketError> where S: WithSocket { + if *DEBUG_FLAG { + client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT)) + } else { + client(socket_addr, Some(DEFAULT_CONNECTION_TIMEOUT)) + } +} + /// Error occurred while establising socket or endpoint #[derive(Debug)] pub enum SocketError { diff --git a/json/Cargo.toml b/json/Cargo.toml index 3c66e1317..90c36cedc 100644 --- a/json/Cargo.toml +++ b/json/Cargo.toml @@ -10,7 +10,7 @@ rustc-serialize = "0.3" serde = "0.8" serde_json = "0.8" serde_macros = { version = "0.8", optional = true } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} [build-dependencies] serde_codegen = { version = "0.8", optional = true } diff --git a/parity/blockchain.rs b/parity/blockchain.rs index ccdf61130..3dfdac804 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -17,23 +17,25 @@ use std::str::{FromStr, from_utf8}; use std::{io, fs}; use std::io::{BufReader, BufRead}; -use std::time::Duration; +use std::time::{Instant, Duration}; use std::thread::sleep; use std::sync::Arc; use rustc_serialize::hex::FromHex; use ethcore_logger::{setup_log, Config as LogConfig}; use io::{PanicHandler, ForwardPanic}; -use util::ToPretty; +use util::{ToPretty, Uint}; use rlp::PayloadInfo; use ethcore::service::ClientService; -use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; +use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockID}; use ethcore::error::ImportError; use ethcore::miner::Miner; use cache::CacheConfig; -use informant::Informant; -use params::{SpecType, Pruning}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; +use informant::{Informant, MillisecondDuration}; +use io_handler::ImportIoHandler; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; +use user_defaults::UserDefaults; use fdlimit; #[derive(Debug, PartialEq)] @@ -107,32 +109,49 @@ pub fn execute(cmd: BlockchainCmd) -> Result { } fn execute_import(cmd: ImportBlockchain) -> Result { + let timer = Instant::now(); + // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + // create dirs used by parity + try!(cmd.dirs.create_dirs()); + // load spec file let spec = try!(cmd.spec.spec()); // load genesis hash let genesis_hash = spec.genesis_header().hash(); - // Setup logging - let _logger = setup_log(&cmd.logger_config); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); fdlimit::raise_fd_limit(); // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); - let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref()); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); // build client let service = try!(ClientService::start( @@ -170,6 +189,10 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color); + try!(service.register_io_handler(Arc::new(ImportIoHandler { + info: Arc::new(informant), + })).map_err(|_| "Unable to register informant handler".to_owned())); + let do_import = |bytes| { while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } match client.import_block(bytes) { @@ -181,7 +204,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result { }, Ok(_) => {}, } - informant.tick(); Ok(()) }; @@ -215,14 +237,36 @@ fn execute_import(cmd: ImportBlockchain) -> Result { } client.flush_queue(); - Ok("Import completed.".into()) + // save user defaults + user_defaults.pruning = algorithm; + user_defaults.tracing = tracing; + try!(user_defaults.save(&user_defaults_path)); + + let report = client.report(); + + let ms = timer.elapsed().as_milliseconds(); + Ok(format!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s", + ms / 1000, + report.blocks_imported, + (report.blocks_imported * 1000) as u64 / ms, + report.transactions_applied, + (report.transactions_applied * 1000) as u64 / ms, + report.gas_processed / From::from(1_000_000), + (report.gas_processed / From::from(ms * 1000)).low_u64(), + ).into()) } fn execute_export(cmd: ExportBlockchain) -> Result { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); - let format = cmd.format.unwrap_or_else(Default::default); + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + // create dirs used by parity + try!(cmd.dirs.create_dirs()); + + let format = cmd.format.unwrap_or_default(); // load spec file let spec = try!(cmd.spec.spec()); @@ -230,23 +274,32 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // load genesis hash let genesis_hash = spec.genesis_header().hash(); - // Setup logging - let _logger = setup_log(&cmd.logger_config); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); fdlimit::raise_fd_limit(); // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); - let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, @@ -266,10 +319,10 @@ fn execute_export(cmd: ExportBlockchain) -> Result { }; let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found")); - let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found")); + let to = try!(client.block_number(cmd.to_block).ok_or("To block could not be found")); for i in from..(to + 1) { - let b = client.block(BlockID::Number(i)).unwrap(); + let b = try!(client.block(BlockID::Number(i)).ok_or("Error exporting incomplete chain")); match format { DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } diff --git a/parity/boot.rs b/parity/boot.rs index aa0e4b82b..d930085db 100644 --- a/parity/boot.rs +++ b/parity/boot.rs @@ -54,16 +54,14 @@ pub fn payload() -> Result { let mut buffer = Vec::new(); try!( - io::stdin().read_to_end(&mut buffer) - .map_err(|io_err| BootError::ReadArgs(io_err)) + io::stdin().read_to_end(&mut buffer).map_err(BootError::ReadArgs) ); - ipc::binary::deserialize::(&buffer) - .map_err(|binary_error| BootError::DecodeArgs(binary_error)) + ipc::binary::deserialize::(&buffer).map_err(BootError::DecodeArgs) } pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket>{ - let hypervisor_client = nanoipc::init_client::>(hv_url).unwrap(); + let hypervisor_client = nanoipc::fast_client::>(hv_url).unwrap(); hypervisor_client.handshake().unwrap(); hypervisor_client.module_ready(module_id, control_url.to_owned()); @@ -73,7 +71,7 @@ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> Guar pub fn dependency>(url: &str) -> Result, BootError> { - nanoipc::init_client::(url).map_err(|socket_err| BootError::DependencyConnect(socket_err)) + nanoipc::generic_client::(url).map_err(BootError::DependencyConnect) } pub fn main_thread() -> Arc { diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml new file mode 100644 index 000000000..ec5dfbe35 --- /dev/null +++ b/parity/cli/config.full.toml @@ -0,0 +1,98 @@ +[parity] +mode = "active" +mode_timeout = 300 +mode_alarm = 3600 +chain = "homestead" +db_path = "$HOME/.parity" +keys_path = "$HOME/.parity/keys" +identity = "" + +[account] +unlock = ["0xdeadbeefcafe0000000000000000000000000000"] +password = ["~/.safe/password.file"] +keys_iterations = 10240 + +[signer] +force = false +disable = false +port = 8180 +interface = "127.0.0.1" +path = "$HOME/.parity/signer" + +[network] +disable = false +port = 30303 +min_peers = 25 +max_peers = 50 +nat = "any" +id = "0x1" +bootnodes = [] +discovery = true + +reserved_only = false +reserved_peers = "./path_to_file" + +[rpc] +disable = false +port = 8545 +interface = "local" +cors = "null" +apis = ["web3", "eth", "net", "personal", "ethcore", "traces", "rpc"] +hosts = ["none"] + +[ipc] +disable = false +path = "$HOME/.parity/jsonrpc.ipc" +apis = ["web3", "eth", "net", "personal", "ethcore", "traces", "rpc"] + +[dapps] +disable = false +port = 8080 +interface = "local" +hosts = ["none"] +path = "$HOME/.parity/dapps" +# authorization: +user = "test_user" +pass = "test_pass" + +[mining] +author = "0xdeadbeefcafe0000000000000000000000000001" +force_sealing = true +reseal_on_txs = "all" +reseal_min_period = 4000 +work_queue_size = 20 +relay_set = "cheap" +usd_per_tx = "0" +usd_per_eth = "auto" +price_update_period = "hourly" +gas_floor_target = "4700000" +gas_cap = "6283184" +tx_queue_size = 1024 +tx_gas_limit = "6283184" +extra_data = "Parity" +remove_solved = false +notify_work = ["http://localhost:3001"] + +[footprint] +tracing = "auto" +pruning = "auto" +cache_size_db = 64 +cache_size_blocks = 8 +cache_size_queue = 50 +cache_size = 128 # Overrides above caches with total size +fast_and_loose = false +db_compaction = "ssd" +fat_db = false + +[snapshots] +disable_periodic = false + +[vm] +jit = false + +[misc] +logging = "own_tx=trace" +log_file = "/var/log/parity.log" +color = true + + diff --git a/parity/cli/config.invalid1.toml b/parity/cli/config.invalid1.toml new file mode 100644 index 000000000..b340da102 --- /dev/null +++ b/parity/cli/config.invalid1.toml @@ -0,0 +1,2 @@ +[account +unlock = "0x1" diff --git a/parity/cli/config.invalid2.toml b/parity/cli/config.invalid2.toml new file mode 100644 index 000000000..766dcfb59 --- /dev/null +++ b/parity/cli/config.invalid2.toml @@ -0,0 +1,4 @@ +[account] +unlock = "0x1" +passwd = [] + diff --git a/parity/cli/config.toml b/parity/cli/config.toml new file mode 100644 index 000000000..11ec333aa --- /dev/null +++ b/parity/cli/config.toml @@ -0,0 +1,63 @@ +[parity] +mode = "dark" +mode_timeout = 15 +mode_alarm = 10 +chain = "./chain.json" + +[account] +unlock = ["0x1", "0x2", "0x3"] +password = ["passwdfile path"] + +[signer] +disable = true + +[network] +disable = false +discovery = true +nat = "any" +min_peers = 10 +max_peers = 20 + +reserved_only = true +reserved_peers = "./path/to/reserved_peers" + + +[rpc] +disable = true +port = 8180 + +[ipc] +apis = ["rpc", "eth"] + +[dapps] +port = 8080 +user = "username" +pass = "password" + +[mining] +author = "0xdeadbeefcafe0000000000000000000000000001" +force_sealing = true +reseal_on_txs = "all" +reseal_min_period = 4000 +price_update_period = "hourly" +tx_queue_size = 2048 + +[footprint] +tracing = "on" +pruning = "fast" +cache_size_db = 128 +cache_size_blocks = 16 +cache_size_queue = 100 +db_compaction = "ssd" +fat_db = true + +[snapshots] +disable_periodic = true + +[vm] +jit = false + +[misc] +logging = "own_tx=trace" +log_file = "/var/log/parity.log" +color = true diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs new file mode 100644 index 000000000..b8b10ec1d --- /dev/null +++ b/parity/cli/mod.rs @@ -0,0 +1,705 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#[macro_use] +mod usage; + +usage! { + { + // Commands + cmd_daemon: bool, + cmd_wallet: bool, + cmd_account: bool, + cmd_new: bool, + cmd_list: bool, + cmd_export: bool, + cmd_import: bool, + cmd_signer: bool, + cmd_new_token: bool, + cmd_snapshot: bool, + cmd_restore: bool, + cmd_ui: bool, + cmd_tools: bool, + cmd_hash: bool, + + // Arguments + arg_pid_file: String, + arg_file: Option, + arg_path: Vec, + + // Flags + // -- Legacy Options + flag_geth: bool, + flag_testnet: bool, + flag_import_geth_keys: bool, + flag_datadir: Option, + flag_networkid: Option, + flag_peers: Option, + flag_nodekey: Option, + flag_nodiscover: bool, + flag_jsonrpc: bool, + flag_jsonrpc_off: bool, + flag_webapp: bool, + flag_dapps_off: bool, + flag_rpc: bool, + flag_rpcaddr: Option, + flag_rpcport: Option, + flag_rpcapi: Option, + flag_rpccorsdomain: Option, + flag_ipcdisable: bool, + flag_ipc_off: bool, + flag_ipcapi: Option, + flag_ipcpath: Option, + flag_gasprice: Option, + flag_etherbase: Option, + flag_extradata: Option, + flag_cache: Option, + + // -- Miscellaneous Options + flag_version: bool, + flag_no_config: bool, + } + { + // -- Operating Options + flag_mode: String = "active", or |c: &Config| otry!(c.parity).mode.clone(), + flag_mode_timeout: u64 = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(), + flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(), + flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(), + flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(), + flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), + flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), + + // -- Account Options + flag_unlock: Option = None, + or |c: &Config| otry!(c.account).unlock.clone().map(|vec| Some(vec.join(","))), + flag_password: Vec = Vec::new(), + or |c: &Config| otry!(c.account).password.clone(), + flag_keys_iterations: u32 = 10240u32, + or |c: &Config| otry!(c.account).keys_iterations.clone(), + + flag_force_signer: bool = false, + or |c: &Config| otry!(c.signer).force.clone(), + flag_no_signer: bool = false, + or |c: &Config| otry!(c.signer).disable.clone(), + flag_signer_port: u16 = 8180u16, + or |c: &Config| otry!(c.signer).port.clone(), + flag_signer_interface: String = "local", + or |c: &Config| otry!(c.signer).interface.clone(), + flag_signer_path: String = "$HOME/.parity/signer", + or |c: &Config| otry!(c.signer).path.clone(), + // NOTE [todr] For security reasons don't put this to config files + flag_signer_no_validation: bool = false, or |_| None, + + // -- Networking Options + flag_no_network: bool = false, + or |c: &Config| otry!(c.network).disable.clone(), + flag_port: u16 = 30303u16, + or |c: &Config| otry!(c.network).port.clone(), + flag_min_peers: u16 = 25u16, + or |c: &Config| otry!(c.network).min_peers.clone(), + flag_max_peers: u16 = 50u16, + or |c: &Config| otry!(c.network).max_peers.clone(), + flag_nat: String = "any", + or |c: &Config| otry!(c.network).nat.clone(), + flag_network_id: Option = None, + or |c: &Config| otry!(c.network).id.clone().map(Some), + flag_bootnodes: Option = None, + or |c: &Config| otry!(c.network).bootnodes.clone().map(|vec| Some(vec.join(","))), + flag_no_discovery: bool = false, + or |c: &Config| otry!(c.network).discovery.map(|d| !d).clone(), + flag_node_key: Option = None, + or |c: &Config| otry!(c.network).node_key.clone().map(Some), + flag_reserved_peers: Option = None, + or |c: &Config| otry!(c.network).reserved_peers.clone().map(Some), + flag_reserved_only: bool = false, + or |c: &Config| otry!(c.network).reserved_only.clone(), + + // -- API and Console Options + // RPC + flag_no_jsonrpc: bool = false, + or |c: &Config| otry!(c.rpc).disable.clone(), + flag_jsonrpc_port: u16 = 8545u16, + or |c: &Config| otry!(c.rpc).port.clone(), + flag_jsonrpc_interface: String = "local", + or |c: &Config| otry!(c.rpc).interface.clone(), + flag_jsonrpc_cors: Option = None, + or |c: &Config| otry!(c.rpc).cors.clone().map(Some), + flag_jsonrpc_apis: String = "web3,eth,net,ethcore,personal,traces,rpc", + or |c: &Config| otry!(c.rpc).apis.clone().map(|vec| vec.join(",")), + flag_jsonrpc_hosts: String = "none", + or |c: &Config| otry!(c.rpc).hosts.clone().map(|vec| vec.join(",")), + + // IPC + flag_no_ipc: bool = false, + or |c: &Config| otry!(c.ipc).disable.clone(), + flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc", + or |c: &Config| otry!(c.ipc).path.clone(), + flag_ipc_apis: String = "web3,eth,net,ethcore,personal,traces,rpc", + or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")), + + // DAPPS + flag_no_dapps: bool = false, + or |c: &Config| otry!(c.dapps).disable.clone(), + flag_dapps_port: u16 = 8080u16, + or |c: &Config| otry!(c.dapps).port.clone(), + flag_dapps_interface: String = "local", + or |c: &Config| otry!(c.dapps).interface.clone(), + flag_dapps_hosts: String = "none", + or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")), + flag_dapps_path: String = "$HOME/.parity/dapps", + or |c: &Config| otry!(c.dapps).path.clone(), + flag_dapps_user: Option = None, + or |c: &Config| otry!(c.dapps).user.clone().map(Some), + flag_dapps_pass: Option = None, + or |c: &Config| otry!(c.dapps).pass.clone().map(Some), + + // -- Sealing/Mining Options + flag_author: Option = None, + or |c: &Config| otry!(c.mining).author.clone().map(Some), + flag_force_sealing: bool = false, + or |c: &Config| otry!(c.mining).force_sealing.clone(), + flag_reseal_on_txs: String = "own", + or |c: &Config| otry!(c.mining).reseal_on_txs.clone(), + flag_reseal_min_period: u64 = 2000u64, + or |c: &Config| otry!(c.mining).reseal_min_period.clone(), + flag_work_queue_size: usize = 20usize, + or |c: &Config| otry!(c.mining).work_queue_size.clone(), + flag_tx_gas_limit: Option = None, + or |c: &Config| otry!(c.mining).tx_gas_limit.clone().map(Some), + flag_relay_set: String = "cheap", + or |c: &Config| otry!(c.mining).relay_set.clone(), + flag_usd_per_tx: String = "0", + or |c: &Config| otry!(c.mining).usd_per_tx.clone(), + flag_usd_per_eth: String = "auto", + or |c: &Config| otry!(c.mining).usd_per_eth.clone(), + flag_price_update_period: String = "hourly", + or |c: &Config| otry!(c.mining).price_update_period.clone(), + flag_gas_floor_target: String = "4700000", + or |c: &Config| otry!(c.mining).gas_floor_target.clone(), + flag_gas_cap: String = "6283184", + or |c: &Config| otry!(c.mining).gas_cap.clone(), + flag_extra_data: Option = None, + or |c: &Config| otry!(c.mining).extra_data.clone().map(Some), + flag_tx_queue_size: usize = 1024usize, + or |c: &Config| otry!(c.mining).tx_queue_size.clone(), + flag_remove_solved: bool = false, + or |c: &Config| otry!(c.mining).remove_solved.clone(), + flag_notify_work: Option = None, + or |c: &Config| otry!(c.mining).notify_work.clone().map(|vec| Some(vec.join(","))), + + // -- Footprint Options + flag_tracing: String = "auto", + or |c: &Config| otry!(c.footprint).tracing.clone(), + flag_pruning: String = "auto", + or |c: &Config| otry!(c.footprint).pruning.clone(), + flag_cache_size_db: u32 = 64u32, + or |c: &Config| otry!(c.footprint).cache_size_db.clone(), + flag_cache_size_blocks: u32 = 8u32, + or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(), + flag_cache_size_queue: u32 = 50u32, + or |c: &Config| otry!(c.footprint).cache_size_queue.clone(), + flag_cache_size: Option = None, + or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some), + flag_fast_and_loose: bool = false, + or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), + flag_db_compaction: String = "ssd", + or |c: &Config| otry!(c.footprint).db_compaction.clone(), + flag_fat_db: bool = false, + or |c: &Config| otry!(c.footprint).fat_db.clone(), + + // -- Import/Export Options + flag_from: String = "1", or |_| None, + flag_to: String = "latest", or |_| None, + flag_format: Option = None, or |_| None, + + // -- Snapshot Optons + flag_at: String = "latest", or |_| None, + flag_no_periodic_snapshot: bool = false, + or |c: &Config| otry!(c.snapshots).disable_periodic.clone(), + + // -- Virtual Machine Options + flag_jitvm: bool = false, + or |c: &Config| otry!(c.vm).jit.clone(), + + // -- Miscellaneous Options + flag_config: String = "$HOME/.parity/config.toml", or |_| None, + flag_logging: Option = None, + or |c: &Config| otry!(c.misc).logging.clone().map(Some), + flag_log_file: Option = None, + or |c: &Config| otry!(c.misc).log_file.clone().map(Some), + flag_no_color: bool = false, + or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), + } +} + + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Config { + parity: Option, + account: Option, + signer: Option, + network: Option, + rpc: Option, + ipc: Option, + dapps: Option, + mining: Option, + footprint: Option, + snapshots: Option, + vm: Option, + misc: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Operating { + mode: Option, + mode_timeout: Option, + mode_alarm: Option, + chain: Option, + db_path: Option, + keys_path: Option, + identity: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Account { + unlock: Option>, + password: Option>, + keys_iterations: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Signer { + force: Option, + disable: Option, + port: Option, + interface: Option, + path: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Network { + disable: Option, + port: Option, + min_peers: Option, + max_peers: Option, + nat: Option, + id: Option, + bootnodes: Option>, + discovery: Option, + node_key: Option, + reserved_peers: Option, + reserved_only: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Rpc { + disable: Option, + port: Option, + interface: Option, + cors: Option, + apis: Option>, + hosts: Option>, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Ipc { + disable: Option, + path: Option, + apis: Option>, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Dapps { + disable: Option, + port: Option, + interface: Option, + hosts: Option>, + path: Option, + user: Option, + pass: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Mining { + author: Option, + force_sealing: Option, + reseal_on_txs: Option, + reseal_min_period: Option, + work_queue_size: Option, + tx_gas_limit: Option, + relay_set: Option, + usd_per_tx: Option, + usd_per_eth: Option, + price_update_period: Option, + gas_floor_target: Option, + gas_cap: Option, + extra_data: Option, + tx_queue_size: Option, + remove_solved: Option, + notify_work: Option>, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Footprint { + tracing: Option, + pruning: Option, + fast_and_loose: Option, + cache_size: Option, + cache_size_db: Option, + cache_size_blocks: Option, + cache_size_queue: Option, + db_compaction: Option, + fat_db: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Snapshots { + disable_periodic: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct VM { + jit: Option, +} + +#[derive(Default, Debug, PartialEq, RustcDecodable)] +struct Misc { + logging: Option, + log_file: Option, + color: Option, +} + +#[cfg(test)] +mod tests { + use super::{ + Args, ArgsError, + Config, Operating, Account, Signer, Network, Rpc, Ipc, Dapps, Mining, Footprint, Snapshots, VM, Misc + }; + use toml; + + #[test] + fn should_parse_args_and_include_config() { + // given + let mut config = Config::default(); + let mut operating = Operating::default(); + operating.chain = Some("morden".into()); + config.parity = Some(operating); + + // when + let args = Args::parse_with_config(&["parity"], config).unwrap(); + + // then + assert_eq!(args.flag_chain, "morden".to_owned()); + } + + #[test] + fn should_not_use_config_if_cli_is_provided() { + // given + let mut config = Config::default(); + let mut operating = Operating::default(); + operating.chain = Some("morden".into()); + config.parity = Some(operating); + + // when + let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); + + // then + assert_eq!(args.flag_chain, "xyz".to_owned()); + } + + #[test] + fn should_parse_full_config() { + // given + let config = toml::decode_str(include_str!("./config.full.toml")).unwrap(); + + // when + let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap(); + + // then + assert_eq!(args, Args { + // Commands + cmd_daemon: false, + cmd_wallet: false, + cmd_account: false, + cmd_new: false, + cmd_list: false, + cmd_export: false, + cmd_import: false, + cmd_signer: false, + cmd_new_token: false, + cmd_snapshot: false, + cmd_restore: false, + cmd_ui: false, + cmd_tools: false, + cmd_hash: false, + + // Arguments + arg_pid_file: "".into(), + arg_file: None, + arg_path: vec![], + + // -- Operating Options + flag_mode: "active".into(), + flag_mode_timeout: 300u64, + flag_mode_alarm: 3600u64, + flag_chain: "xyz".into(), + flag_db_path: "$HOME/.parity".into(), + flag_keys_path: "$HOME/.parity/keys".into(), + flag_identity: "".into(), + + // -- Account Options + flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), + flag_password: vec!["~/.safe/password.file".into()], + flag_keys_iterations: 10240u32, + + flag_force_signer: false, + flag_no_signer: false, + flag_signer_port: 8180u16, + flag_signer_interface: "127.0.0.1".into(), + flag_signer_path: "$HOME/.parity/signer".into(), + flag_signer_no_validation: false, + + // -- Networking Options + flag_no_network: false, + flag_port: 30303u16, + flag_min_peers: 25u16, + flag_max_peers: 50u16, + flag_nat: "any".into(), + flag_network_id: Some("0x1".into()), + flag_bootnodes: Some("".into()), + flag_no_discovery: false, + flag_node_key: None, + flag_reserved_peers: Some("./path_to_file".into()), + flag_reserved_only: false, + + // -- API and Console Options + // RPC + flag_no_jsonrpc: false, + flag_jsonrpc_port: 8545u16, + flag_jsonrpc_interface: "local".into(), + flag_jsonrpc_cors: Some("null".into()), + flag_jsonrpc_apis: "web3,eth,net,personal,ethcore,traces,rpc".into(), + flag_jsonrpc_hosts: "none".into(), + + // IPC + flag_no_ipc: false, + flag_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(), + flag_ipc_apis: "web3,eth,net,personal,ethcore,traces,rpc".into(), + + // DAPPS + flag_no_dapps: false, + flag_dapps_port: 8080u16, + flag_dapps_interface: "local".into(), + flag_dapps_hosts: "none".into(), + flag_dapps_path: "$HOME/.parity/dapps".into(), + flag_dapps_user: Some("test_user".into()), + flag_dapps_pass: Some("test_pass".into()), + + // -- Sealing/Mining Options + flag_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + flag_force_sealing: true, + flag_reseal_on_txs: "all".into(), + flag_reseal_min_period: 4000u64, + flag_work_queue_size: 20usize, + flag_tx_gas_limit: Some("6283184".into()), + flag_relay_set: "cheap".into(), + flag_usd_per_tx: "0".into(), + flag_usd_per_eth: "auto".into(), + flag_price_update_period: "hourly".into(), + flag_gas_floor_target: "4700000".into(), + flag_gas_cap: "6283184".into(), + flag_extra_data: Some("Parity".into()), + flag_tx_queue_size: 1024usize, + flag_remove_solved: false, + flag_notify_work: Some("http://localhost:3001".into()), + + // -- Footprint Options + flag_tracing: "auto".into(), + flag_pruning: "auto".into(), + flag_cache_size_db: 64u32, + flag_cache_size_blocks: 8u32, + flag_cache_size_queue: 50u32, + flag_cache_size: Some(128), + flag_fast_and_loose: false, + flag_db_compaction: "ssd".into(), + flag_fat_db: false, + + // -- Import/Export Options + flag_from: "1".into(), + flag_to: "latest".into(), + flag_format: None, + + // -- Snapshot Optons + flag_at: "latest".into(), + flag_no_periodic_snapshot: false, + + // -- Virtual Machine Options + flag_jitvm: false, + + // -- Legacy Options + flag_geth: false, + flag_testnet: false, + flag_import_geth_keys: false, + flag_datadir: None, + flag_networkid: None, + flag_peers: None, + flag_nodekey: None, + flag_nodiscover: false, + flag_jsonrpc: false, + flag_jsonrpc_off: false, + flag_webapp: false, + flag_dapps_off: false, + flag_rpc: false, + flag_rpcaddr: None, + flag_rpcport: None, + flag_rpcapi: None, + flag_rpccorsdomain: None, + flag_ipcdisable: false, + flag_ipc_off: false, + flag_ipcapi: None, + flag_ipcpath: None, + flag_gasprice: None, + flag_etherbase: None, + flag_extradata: None, + flag_cache: None, + + // -- Miscellaneous Options + flag_version: false, + flag_config: "$HOME/.parity/config.toml".into(), + flag_logging: Some("own_tx=trace".into()), + flag_log_file: Some("/var/log/parity.log".into()), + flag_no_color: false, + flag_no_config: false, + }); + } + + #[test] + fn should_parse_config_and_return_errors() { + let config1 = Args::parse_config(include_str!("./config.invalid1.toml")); + let config2 = Args::parse_config(include_str!("./config.invalid2.toml")); + + match (config1, config2) { + (Err(ArgsError::Parsing(_)), Err(ArgsError::Decode(_))) => {}, + (a, b) => { + assert!(false, "Got invalid error types: {:?}, {:?}", a, b); + } + } + } + + #[test] + fn should_deserialize_toml_file() { + let config: Config = toml::decode_str(include_str!("./config.toml")).unwrap(); + + assert_eq!(config, Config { + parity: Some(Operating { + mode: Some("dark".into()), + mode_timeout: Some(15u64), + mode_alarm: Some(10u64), + chain: Some("./chain.json".into()), + db_path: None, + keys_path: None, + identity: None, + }), + account: Some(Account { + unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), + password: Some(vec!["passwdfile path".into()]), + keys_iterations: None, + }), + signer: Some(Signer { + force: None, + disable: Some(true), + port: None, + interface: None, + path: None, + }), + network: Some(Network { + disable: Some(false), + port: None, + min_peers: Some(10), + max_peers: Some(20), + nat: Some("any".into()), + id: None, + bootnodes: None, + discovery: Some(true), + node_key: None, + reserved_peers: Some("./path/to/reserved_peers".into()), + reserved_only: Some(true), + }), + rpc: Some(Rpc { + disable: Some(true), + port: Some(8180), + interface: None, + cors: None, + apis: None, + hosts: None, + }), + ipc: Some(Ipc { + disable: None, + path: None, + apis: Some(vec!["rpc".into(), "eth".into()]), + }), + dapps: Some(Dapps { + disable: None, + port: Some(8080), + path: None, + interface: None, + hosts: None, + user: Some("username".into()), + pass: Some("password".into()) + }), + mining: Some(Mining { + author: Some("0xdeadbeefcafe0000000000000000000000000001".into()), + force_sealing: Some(true), + reseal_on_txs: Some("all".into()), + reseal_min_period: Some(4000), + work_queue_size: None, + relay_set: None, + usd_per_tx: None, + usd_per_eth: None, + price_update_period: Some("hourly".into()), + gas_floor_target: None, + gas_cap: None, + tx_queue_size: Some(2048), + tx_gas_limit: None, + extra_data: None, + remove_solved: None, + notify_work: None, + }), + footprint: Some(Footprint { + tracing: Some("on".into()), + pruning: Some("fast".into()), + fast_and_loose: None, + cache_size: None, + cache_size_db: Some(128), + cache_size_blocks: Some(16), + cache_size_queue: Some(100), + db_compaction: Some("ssd".into()), + fat_db: Some(true), + }), + snapshots: Some(Snapshots { + disable_periodic: Some(true), + }), + vm: Some(VM { + jit: Some(false), + }), + misc: Some(Misc { + logging: Some("own_tx=trace".into()), + log_file: Some("/var/log/parity.log".into()), + color: Some(true), + }) + }); + } +} diff --git a/parity/cli/usage.rs b/parity/cli/usage.rs new file mode 100644 index 000000000..da406d36a --- /dev/null +++ b/parity/cli/usage.rs @@ -0,0 +1,213 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +macro_rules! otry { + ($e: expr) => ( + match $e { + Some(ref v) => v, + None => { + return None; + } + } + ) +} +macro_rules! usage { + ( + { + $( + $field_a:ident : $typ_a:ty, + )* + } + { + $( + $field:ident : $typ:ty = $default:expr, or $from_config:expr, + )* + } + ) => { + use toml; + use std::{fs, io, process}; + use std::io::Read; + use util::version; + use docopt::{Docopt, Error as DocoptError}; + use helpers::replace_home; + use rustc_serialize; + + #[derive(Debug)] + pub enum ArgsError { + Docopt(DocoptError), + Parsing(Vec), + Decode(toml::DecodeError), + Config(String, io::Error), + } + + impl ArgsError { + pub fn exit(self) -> ! { + match self { + ArgsError::Docopt(e) => e.exit(), + ArgsError::Parsing(errors) => { + println!("There is an error in config file."); + for e in &errors { + println!("{}", e); + } + process::exit(2) + }, + ArgsError::Decode(e) => { + println!("You might have supplied invalid parameters in config file."); + println!("{}", e); + process::exit(2) + }, + ArgsError::Config(path, e) => { + println!("There was an error reading your config file at: {}", path); + println!("{}", e); + process::exit(2) + } + } + } + } + + impl From for ArgsError { + fn from(e: DocoptError) -> Self { ArgsError::Docopt(e) } + } + + impl From for ArgsError { + fn from(e: toml::DecodeError) -> Self { ArgsError::Decode(e) } + } + + #[derive(Debug, PartialEq)] + pub struct Args { + $( + pub $field_a: $typ_a, + )* + + $( + pub $field: $typ, + )* + } + + impl Default for Args { + fn default() -> Self { + Args { + $( + $field_a: Default::default(), + )* + + $( + $field: $default.into(), + )* + } + } + } + + #[derive(Default, Debug, PartialEq, Clone, RustcDecodable)] + struct RawArgs { + $( + $field_a: $typ_a, + )* + $( + $field: Option<$typ>, + )* + } + + impl Args { + + pub fn parse>(command: &[S]) -> Result { + let raw_args = try!(RawArgs::parse(command)); + + // Skip loading config file if no_config flag is specified + if raw_args.flag_no_config { + return Ok(raw_args.into_args(Config::default())); + } + + let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config); + let config_file = replace_home(&config_file); + let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) { + // Load config file + (Ok(mut file), _) => { + println!("Loading config file from {}", &config_file); + let mut config = String::new(); + try!(file.read_to_string(&mut config).map_err(|e| ArgsError::Config(config_file, e))); + try!(Self::parse_config(&config)) + }, + // Don't display error in case default config cannot be loaded. + (Err(_), false) => Config::default(), + // Config set from CLI (fail with error) + (Err(e), true) => { + return Err(ArgsError::Config(config_file, e)); + }, + }; + + Ok(raw_args.into_args(config)) + } + + #[cfg(test)] + pub fn parse_without_config>(command: &[S]) -> Result { + Self::parse_with_config(command, Config::default()) + } + + #[cfg(test)] + fn parse_with_config>(command: &[S], config: Config) -> Result { + Ok(try!(RawArgs::parse(command)).into_args(config)) + } + + fn parse_config(config: &str) -> Result { + let mut value_parser = toml::Parser::new(&config); + match value_parser.parse() { + Some(value) => { + let result = rustc_serialize::Decodable::decode(&mut toml::Decoder::new(toml::Value::Table(value))); + match result { + Ok(config) => Ok(config), + Err(e) => Err(e.into()), + } + }, + None => Err(ArgsError::Parsing(value_parser.errors)), + } + } + + pub fn print_version() -> String { + format!(include_str!("./version.txt"), version()) + } + } + + impl RawArgs { + fn into_args(self, config: Config) -> Args { + let mut args = Args::default(); + $( + args.$field_a = self.$field_a; + )* + $( + args.$field = self.$field.or_else(|| $from_config(&config)).unwrap_or_else(|| $default.into()); + )* + args + } + + pub fn parse>(command: &[S]) -> Result { + Docopt::new(Self::usage()).and_then(|d| d.argv(command).decode()) + } + + fn usage() -> String { + format!( + include_str!("./usage.txt"), + $( + $field={ let v: $typ = $default.into(); v }, + // Uncomment this to debug + // "named argument never used" error + // $field = $default, + )* + ) + } + } + }; +} diff --git a/parity/cli.rs b/parity/cli/usage.txt similarity index 59% rename from parity/cli.rs rename to parity/cli/usage.txt index bb46bda13..a94f55a8d 100644 --- a/parity/cli.rs +++ b/parity/cli/usage.txt @@ -1,23 +1,3 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use util::version; -use docopt::Docopt; - -pub const USAGE: &'static str = r#" Parity. Ethereum Client. By Wood/Paronyan/Kotewicz/Drwięga/Volf et al. Copyright 2015, 2016 Ethcore (UK) Limited @@ -33,7 +13,8 @@ Usage: parity export [ ] [options] parity signer new-token [options] parity snapshot [options] - parity restore [options] + parity restore [ ] [options] + parity tools hash Operating Options: --mode MODE Set the operating mode. MODE can be one of: @@ -41,134 +22,140 @@ Operating Options: passive - Parity syncs initially, then sleeps and wakes regularly to resync. dark - Parity syncs only when an external interface - is active. [default: active]. + is active. (default: {flag_mode}). --mode-timeout SECS Specify the number of seconds before inactivity timeout occurs when mode is dark or passive - [default: 300]. + (default: {flag_mode_timeout}). --mode-alarm SECS Specify the number of seconds before auto sleep reawake timeout occurs when mode is passive - [default: 3600]. + (default: {flag_mode_alarm}). --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, homestead, mainnet, morden, classic or testnet - [default: homestead]. + (default: {flag_chain}). -d --db-path PATH Specify the database & configuration directory path - [default: $HOME/.parity]. + (default: {flag_db_path}). --keys-path PATH Specify the path for JSON key files to be found - [default: $HOME/.parity/keys]. - --identity NAME Specify your node's name. + (default: {flag_keys_path}). + --identity NAME Specify your node's name. (default: {flag_identity}) Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. ACCOUNTS is a comma-delimited list of addresses. - Implies --no-signer. + Implies --no-signer. (default: {flag_unlock:?}) --password FILE Provide a file containing a password for unlocking - an account. + an account. (default: {flag_password:?}) --keys-iterations NUM Specify the number of iterations to use when deriving key from the password (bigger is more - secure) [default: 10240]. + secure) (default: {flag_keys_iterations}). --force-signer Enable Trusted Signer WebSocket endpoint used by Signer UIs, even when --unlock is in use. + (default: ${flag_force_signer}) --no-signer Disable Trusted Signer WebSocket endpoint used by - Signer UIs. + Signer UIs. (default: ${flag_no_signer}) --signer-port PORT Specify the port of Trusted Signer server - [default: 8180]. + (default: {flag_signer_port}). --signer-interface IP Specify the hostname portion of the Trusted Signer server, IP should be an interface's IP address, - or local [default: local]. + or local (default: {flag_signer_interface}). --signer-path PATH Specify directory where Signer UIs tokens should - be stored. [default: $HOME/.parity/signer] + be stored. (default: {flag_signer_path}) --signer-no-validation Disable Origin and Host headers validation for Trusted Signer. WARNING: INSECURE. Used only for - development. + development. (default: {flag_signer_no_validation}) Networking Options: - --no-network Disable p2p networking. + --no-network Disable p2p networking. (default: {flag_no_network}) --port PORT Override the port on which the node should listen - [default: 30303]. - --min-peers NUM Try to maintain at least NUM peers [default: 25]. - --max-peers NUM Allow up to that many peers [default: 50]. + (default: {flag_port}). + --min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}). + --max-peers NUM Allow up to that many peers (default: {flag_max_peers}). --nat METHOD Specify method to use for determining public address. Must be one of: any, none, upnp, - extip: [default: any]. + extip: (default: {flag_nat}). --network-id INDEX Override the network identifier from the chain we - are on. + are on. (default: {flag_network_id:?}) --bootnodes NODES Override the bootnodes from our chain. NODES should - be comma-delimited enodes. - --no-discovery Disable new peer discovery. + be comma-delimited enodes. (default: {flag_bootnodes:?}) + --no-discovery Disable new peer discovery. (default: {flag_no_discovery}) --node-key KEY Specify node secret key, either as 64-character hex - string or input to SHA3 operation. + string or input to SHA3 operation. (default: {flag_node_key:?}) --reserved-peers FILE Provide a file containing enodes, one per line. These nodes will always have a reserved slot on top - of the normal maximum peers. - --reserved-only Connect only to reserved nodes. + of the normal maximum peers. (default: {flag_reserved_peers:?}) + --reserved-only Connect only to reserved nodes. (default: {flag_reserved_only}) API and Console Options: - --no-jsonrpc Disable the JSON-RPC API server. + --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc}) --jsonrpc-port PORT Specify the port portion of the JSONRPC API server - [default: 8545]. + (default: {flag_jsonrpc_port}). --jsonrpc-interface IP Specify the hostname portion of the JSONRPC API server, IP should be an interface's IP address, or - all (all interfaces) or local [default: local]. + all (all interfaces) or local (default: {flag_jsonrpc_interface}). --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses. + (default: {flag_jsonrpc_cors:?}) --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited list of API name. Possible name are web3, eth, net, personal, ethcore, ethcore_set, traces, rpc. - [default: web3,eth,net,ethcore,personal,traces,rpc]. + (default: {flag_jsonrpc_apis}). --jsonrpc-hosts HOSTS List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: "all", "none", - [default: none]. + (default: {flag_jsonrpc_hosts}). - --no-ipc Disable JSON-RPC over IPC service. + --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc}) --ipc-path PATH Specify custom path for JSON-RPC over IPC service - [default: $HOME/.parity/jsonrpc.ipc]. + (default: {flag_ipc_path}). --ipc-apis APIS Specify custom API set available via JSON-RPC over - IPC [default: web3,eth,net,ethcore,personal,traces,rpc]. + IPC (default: {flag_ipc_apis}). - --no-dapps Disable the Dapps server (e.g. status page). + --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps}) --dapps-port PORT Specify the port portion of the Dapps server - [default: 8080]. + (default: {flag_dapps_port}). --dapps-interface IP Specify the hostname portion of the Dapps server, IP should be an interface's IP address, - or local [default: local]. + or local (default: {flag_dapps_interface}). --dapps-hosts HOSTS List of allowed Host header values. This option will validate the Host header sent by the browser, it is additional security against some attack vectors. Special options: "all", "none", - [default: none]. + (default: {flag_dapps_hosts}). --dapps-user USERNAME Specify username for Dapps server. It will be used in HTTP Basic Authentication Scheme. If --dapps-pass is not specified you will be - asked for password on startup. + asked for password on startup. (default: {flag_dapps_user:?}) --dapps-pass PASSWORD Specify password for Dapps server. Use only in - conjunction with --dapps-user. + conjunction with --dapps-user. (default: {flag_dapps_pass:?}) --dapps-path PATH Specify directory where dapps should be installed. - [default: $HOME/.parity/dapps] + (default: {flag_dapps_path}) Sealing/Mining Options: --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks. NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION. + (default: {flag_author:?}) --force-sealing Force the node to author new blocks as if it were always sealing/mining. + (default: {flag_force_sealing}) --reseal-on-txs SET Specify which transactions should force the node to reseal a block. SET is one of: none - never reseal on new transactions; own - reseal only on a new local transaction; ext - reseal only on a new external transaction; - all - reseal on all new transactions [default: own]. + all - reseal on all new transactions + (default: {flag_reseal_on_txs}). --reseal-min-period MS Specify the minimum time between reseals from incoming transactions. MS is time measured in - milliseconds [default: 2000]. + milliseconds (default: {flag_reseal_min_period}). --work-queue-size ITEMS Specify the number of historical work packages which are kept cached lest a solution is found for them later. High values take more memory but result - in fewer unusable solutions [default: 20]. + in fewer unusable solutions (default: {flag_work_queue_size}). --tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas a single transaction may have for it to be mined. + (default: {flag_tx_gas_limit:?}) --relay-set SET Set of transactions to relay. SET may be: cheap - Relay any transaction in the queue (this may include invalid transactions); @@ -176,78 +163,81 @@ Sealing/Mining Options: guarantees we don't relay invalid transactions, but means we relay nothing if not mining); lenient - Same as strict when mining, and cheap - when not [default: cheap]. + when not (default: {flag_relay_set}). --usd-per-tx USD Amount of USD to be paid for a basic transaction - [default: 0]. The minimum gas price is set + (default: {flag_usd_per_tx}). The minimum gas price is set accordingly. --usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an amount in USD, a web service or 'auto' to use each web service in turn and fallback on the last known - good value [default: auto]. + good value (default: {flag_usd_per_eth}). --price-update-period T T will be allowed to pass between each gas price update. T may be daily, hourly, a number of seconds, or a time string of the form "2 days", "30 minutes" - etc. [default: hourly]. + etc. (default: {flag_price_update_period}). --gas-floor-target GAS Amount of gas per block to target when sealing a new - block [default: 4700000]. + block (default: {flag_gas_floor_target}). --gas-cap GAS A cap on how large we will raise the gas limit per - block due to transaction volume [default: 6283184]. + block due to transaction volume (default: {flag_gas_cap}). --extra-data STRING Specify a custom extra-data for authored blocks, no - more than 32 characters. + more than 32 characters. (default: {flag_extra_data:?}) --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting - to be included in next block) [default: 1024]. + to be included in next block) (default: {flag_tx_queue_size}). --remove-solved Move solved blocks from the work package queue instead of cloning them. This gives a slightly faster import speed, but means that extra solutions submitted for the same work package will go unused. + (default: {flag_remove_solved}) --notify-work URLS URLs to which work package notifications are pushed. URLS should be a comma-delimited list of HTTP URLs. + (default: {flag_notify_work:?}) Footprint Options: --tracing BOOL Indicates if full transaction tracing should be enabled. Works only if client had been fully synced with tracing enabled. BOOL may be one of auto, on, off. auto uses last used value of this option (off - if it does not exist) [default: auto]. + if it does not exist) (default: {flag_tracing}). --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of auto, archive, fast: archive - keep all state trie data. No pruning. fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or - default to fast if none synced [default: auto]. - --cache-size-db MB Override database cache size [default: 64]. + default to fast if none synced (default: {flag_pruning}). + --cache-size-db MB Override database cache size (default: {flag_cache_size_db}). --cache-size-blocks MB Specify the prefered size of the blockchain cache in - megabytes [default: 8]. + megabytes (default: {flag_cache_size_blocks}). --cache-size-queue MB Specify the maximum size of memory to use for block - queue [default: 50]. + queue (default: {flag_cache_size_queue}). --cache-size MB Set total amount of discretionary memory to use for the entire system, overrides other cache and queue - options. + options.a (default: {flag_cache_size:?}) --fast-and-loose Disables DB WAL, which gives a significant speed up - but means an unclean exit is unrecoverable. + but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose}) --db-compaction TYPE Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; - hdd - suitable for slow HDDs [default: ssd]. - --fat-db Fat database. + hdd - suitable for slow HDDs (default: {flag_db_compaction}). + --fat-db Fat database. (default: {flag_fat_db}) Import/Export Options: --from BLOCK Export from block BLOCK, which may be an index or - hash [default: 1]. + hash (default: {flag_from}). --to BLOCK Export to (including) block BLOCK, which may be an - index, hash or 'latest' [default: latest]. + index, hash or 'latest' (default: {flag_to}). --format FORMAT For import/export in given format. FORMAT must be one of 'hex' and 'binary'. + (default: {flag_format:?} = Import: auto, Export: binary) Snapshot Options: --at BLOCK Take a snapshot at the given block, which may be an index, hash, or 'latest'. Note that taking snapshots at non-recent blocks will only work with --pruning archive - [default: latest] + (default: {flag_at}) --no-periodic-snapshot Disable automated snapshots which usually occur once - every 10000 blocks. + every 10000 blocks. (default: {flag_no_periodic_snapshot}) Virtual Machine Options: - --jitvm Enable the JIT VM. + --jitvm Enable the JIT VM. (default: {flag_jitvm}) Legacy Options: --geth Run in Geth-compatibility mode. Sets the IPC path @@ -284,156 +274,13 @@ Legacy Options: --cache MB Equivalent to --cache-size MB. Miscellaneous Options: + -c --config CONFIG Specify a filename containing a configuration file. + (default: {flag_config}) -l --logging LOGGING Specify the logging level. Must conform to the same - format as RUST_LOG. + format as RUST_LOG. (default: {flag_logging:?}) --log-file FILENAME Specify a filename into which logging should be - directed. - --no-color Don't use terminal color codes in output. + directed. (default: {flag_log_file:?}) + --no-config Don't load a configuration file. + --no-color Don't use terminal color codes in output. (default: {flag_no_color}) -v --version Show information about version. -h --help Show this screen. -"#; - -#[derive(Debug, PartialEq, RustcDecodable)] -pub struct Args { - pub cmd_daemon: bool, - pub cmd_account: bool, - pub cmd_wallet: bool, - pub cmd_new: bool, - pub cmd_list: bool, - pub cmd_export: bool, - pub cmd_import: bool, - pub cmd_signer: bool, - pub cmd_new_token: bool, - pub cmd_snapshot: bool, - pub cmd_restore: bool, - pub cmd_ui: bool, - pub arg_pid_file: String, - pub arg_file: Option, - pub arg_path: Vec, - pub flag_mode: String, - pub flag_mode_timeout: u64, - pub flag_mode_alarm: u64, - pub flag_chain: String, - pub flag_db_path: String, - pub flag_identity: String, - pub flag_unlock: Option, - pub flag_password: Vec, - pub flag_keys_path: String, - pub flag_keys_iterations: u32, - pub flag_import_geth_keys: bool, - pub flag_bootnodes: Option, - pub flag_network_id: Option, - pub flag_pruning: String, - pub flag_tracing: String, - pub flag_port: u16, - pub flag_min_peers: u16, - pub flag_max_peers: u16, - pub flag_no_discovery: bool, - pub flag_nat: String, - pub flag_node_key: Option, - pub flag_reserved_peers: Option, - pub flag_reserved_only: bool, - - pub flag_cache_size_db: u32, - pub flag_cache_size_blocks: u32, - pub flag_cache_size_queue: u32, - pub flag_cache_size: Option, - pub flag_cache: Option, - pub flag_fast_and_loose: bool, - - pub flag_no_jsonrpc: bool, - pub flag_jsonrpc_interface: String, - pub flag_jsonrpc_port: u16, - pub flag_jsonrpc_cors: Option, - pub flag_jsonrpc_hosts: String, - pub flag_jsonrpc_apis: String, - pub flag_no_ipc: bool, - pub flag_ipc_path: String, - pub flag_ipc_apis: String, - pub flag_no_dapps: bool, - pub flag_dapps_port: u16, - pub flag_dapps_interface: String, - pub flag_dapps_hosts: String, - pub flag_dapps_user: Option, - pub flag_dapps_pass: Option, - pub flag_dapps_path: String, - pub flag_force_signer: bool, - pub flag_no_signer: bool, - pub flag_signer_port: u16, - pub flag_signer_interface: String, - pub flag_signer_path: String, - pub flag_signer_no_validation: bool, - pub flag_force_sealing: bool, - pub flag_reseal_on_txs: String, - pub flag_reseal_min_period: u64, - pub flag_work_queue_size: usize, - pub flag_remove_solved: bool, - pub flag_tx_gas_limit: Option, - pub flag_relay_set: String, - pub flag_author: Option, - pub flag_usd_per_tx: String, - pub flag_usd_per_eth: String, - pub flag_price_update_period: String, - pub flag_gas_floor_target: String, - pub flag_gas_cap: String, - pub flag_extra_data: Option, - pub flag_tx_queue_size: usize, - pub flag_notify_work: Option, - pub flag_logging: Option, - pub flag_version: bool, - pub flag_from: String, - pub flag_to: String, - pub flag_at: String, - pub flag_no_periodic_snapshot: bool, - pub flag_format: Option, - pub flag_jitvm: bool, - pub flag_log_file: Option, - pub flag_no_color: bool, - pub flag_no_network: bool, - // legacy... - pub flag_geth: bool, - pub flag_nodekey: Option, - pub flag_nodiscover: bool, - pub flag_peers: Option, - pub flag_datadir: Option, - pub flag_extradata: Option, - pub flag_etherbase: Option, - pub flag_gasprice: Option, - pub flag_jsonrpc: bool, - pub flag_webapp: bool, - pub flag_rpc: bool, - pub flag_rpcaddr: Option, - pub flag_rpcport: Option, - pub flag_rpccorsdomain: Option, - pub flag_rpcapi: Option, - pub flag_testnet: bool, - pub flag_networkid: Option, - pub flag_ipcdisable: bool, - pub flag_ipc_off: bool, - pub flag_jsonrpc_off: bool, - pub flag_dapps_off: bool, - pub flag_ipcpath: Option, - pub flag_ipcapi: Option, - pub flag_db_compaction: String, - pub flag_fat_db: bool, -} - -impl Default for Args { - fn default() -> Self { - Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap() - } -} - -pub fn print_version() -> String { - format!("\ -Parity - version {} -Copyright 2015, 2016 Ethcore (UK) Limited -License GPLv3+: GNU GPL version 3 or later . -This is free software: you are free to change and redistribute it. -There is NO WARRANTY, to the extent permitted by law. - -By Wood/Paronyan/Kotewicz/Drwięga/Volf.\ -", version()) -} - diff --git a/parity/cli/version.txt b/parity/cli/version.txt new file mode 100644 index 000000000..acb7cd9e6 --- /dev/null +++ b/parity/cli/version.txt @@ -0,0 +1,9 @@ +Parity + version {} +Copyright 2015, 2016 Ethcore (UK) Limited +License GPLv3+: GNU GPL version 3 or later . +This is free software: you are free to change and redistribute it. +There is NO WARRANTY, to the extent permitted by law. + +By Wood/Paronyan/Kotewicz/Drwięga/Volf. + diff --git a/parity/configuration.rs b/parity/configuration.rs index 51d637580..1aa338c26 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -19,8 +19,7 @@ use std::io::Read; use std::net::SocketAddr; use std::path::PathBuf; use std::cmp::max; -use cli::{USAGE, Args}; -use docopt::{Docopt, Error as DocoptError}; +use cli::{Args, ArgsError}; use util::{Hashable, U256, Uint, Bytes, version_data, Secret, Address}; use util::log::Colour; use ethsync::{NetworkConfiguration, is_valid_node_url}; @@ -52,6 +51,7 @@ pub enum Cmd { Blockchain(BlockchainCmd), SignerToken(String), Snapshot(SnapshotCommand), + Hash(Option), } #[derive(Debug, PartialEq)] @@ -60,8 +60,8 @@ pub struct Configuration { } impl Configuration { - pub fn parse(command: I) -> Result where I: IntoIterator, S: AsRef { - let args = try!(Docopt::new(USAGE).and_then(|d| d.argv(command).decode())); + pub fn parse>(command: &[S]) -> Result { + let args = try!(Args::parse(command)); let config = Configuration { args: args, @@ -95,8 +95,10 @@ impl Configuration { let cmd = if self.args.flag_version { Cmd::Version - } else if self.args.cmd_signer { + } else if self.args.cmd_signer && self.args.cmd_new_token { Cmd::SignerToken(dirs.signer) + } else if self.args.cmd_tools && self.args.cmd_hash { + Cmd::Hash(self.args.arg_file) } else if self.args.cmd_account { let account_cmd = if self.args.cmd_new { let new_acc = NewAccount { @@ -628,8 +630,7 @@ impl Configuration { #[cfg(test)] mod tests { use super::*; - use cli::USAGE; - use docopt::Docopt; + use cli::Args; use ethcore_rpc::NetworkSettings; use ethcore::client::{VMType, BlockID}; use helpers::{replace_home, default_network_config}; @@ -647,21 +648,21 @@ mod tests { fn parse(args: &[&str]) -> Configuration { Configuration { - args: Docopt::new(USAGE).unwrap().argv(args).decode().unwrap(), + args: Args::parse_without_config(args).unwrap(), } } #[test] fn test_command_version() { let args = vec!["parity", "--version"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Version); } #[test] fn test_command_account_new() { let args = vec!["parity", "account", "new"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount { iterations: 10240, path: replace_home("$HOME/.parity/keys"), @@ -672,7 +673,7 @@ mod tests { #[test] fn test_command_account_list() { let args = vec!["parity", "account", "list"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Account( AccountCmd::List(replace_home("$HOME/.parity/keys"))) ); @@ -681,7 +682,7 @@ mod tests { #[test] fn test_command_account_import() { let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts { from: vec!["my_dir".into(), "another_dir".into()], to: replace_home("$HOME/.parity/keys"), @@ -691,7 +692,7 @@ mod tests { #[test] fn test_command_wallet_import() { let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet { iterations: 10240, path: replace_home("$HOME/.parity/keys"), @@ -703,7 +704,7 @@ mod tests { #[test] fn test_command_blockchain_import() { let args = vec!["parity", "import", "blockchain.json"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { spec: Default::default(), logger_config: Default::default(), @@ -723,7 +724,7 @@ mod tests { #[test] fn test_command_blockchain_export() { let args = vec!["parity", "export", "blockchain.json"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { spec: Default::default(), logger_config: Default::default(), @@ -744,7 +745,7 @@ mod tests { #[test] fn test_command_blockchain_export_with_custom_format() { let args = vec!["parity", "export", "--format", "hex", "blockchain.json"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { spec: Default::default(), logger_config: Default::default(), @@ -765,7 +766,7 @@ mod tests { #[test] fn test_command_signer_new_token() { let args = vec!["parity", "signer", "new-token"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); let expected = replace_home("$HOME/.parity/signer"); assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected)); } @@ -773,7 +774,7 @@ mod tests { #[test] fn test_run_cmd() { let args = vec!["parity"]; - let conf = Configuration::parse(args).unwrap(); + let conf = parse(&args); assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd { cache_config: Default::default(), dirs: Default::default(), @@ -962,7 +963,7 @@ mod tests { let filename = temp.as_str().to_owned() + "/peers"; File::create(filename.clone()).unwrap().write_all(b" \n\t\n").unwrap(); let args = vec!["parity", "--reserved-peers", &filename]; - let conf = Configuration::parse(args).unwrap(); + let conf = Configuration::parse(&args).unwrap(); assert!(conf.init_reserved_nodes().is_ok()); } } diff --git a/parity/dir.rs b/parity/dir.rs index d31e81e2c..158b5b2c5 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -52,32 +52,13 @@ impl Directories { Ok(()) } - /// Get the chain's root path. - pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { - let mut dir = Path::new(&self.db).to_path_buf(); - dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); - dir - } - - /// Get the root path for database - pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { - let mut dir = self.chain_path(genesis_hash, fork_name); - dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); - dir - } - - /// Get the path for the databases given the genesis_hash and information on the databases. - pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { - let mut dir = self.db_version_path(genesis_hash, fork_name, pruning); - dir.push("db"); - dir - } - - /// Get the path for the snapshot directory given the genesis hash and fork name. - pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { - let mut dir = self.chain_path(genesis_hash, fork_name); - dir.push("snapshot"); - dir + /// Database paths. + pub fn database(&self, genesis_hash: H256, fork_name: Option) -> DatabaseDirectories { + DatabaseDirectories { + path: self.db.clone(), + genesis_hash: genesis_hash, + fork_name: fork_name, + } } /// Get the ipc sockets path @@ -88,6 +69,49 @@ impl Directories { } } +#[derive(Debug, PartialEq)] +pub struct DatabaseDirectories { + pub path: String, + pub genesis_hash: H256, + pub fork_name: Option, +} + +impl DatabaseDirectories { + fn fork_path(&self) -> PathBuf { + let mut dir = Path::new(&self.path).to_path_buf(); + dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default())); + dir + } + + /// Get the root path for database + pub fn version_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.fork_path(); + dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + dir + } + + /// Get the path for the databases given the genesis_hash and information on the databases. + pub fn client_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.version_path(pruning); + dir.push("db"); + dir + } + + /// Get user defaults path + pub fn user_defaults_path(&self) -> PathBuf { + let mut dir = self.fork_path(); + dir.push("user_defaults"); + dir + } + + /// Get the path for the snapshot directory given the genesis hash and fork name. + pub fn snapshot_path(&self) -> PathBuf { + let mut dir = self.fork_path(); + dir.push("snapshot"); + dir + } +} + #[cfg(test)] mod tests { use super::Directories; diff --git a/parity/helpers.rs b/parity/helpers.rs index 778dc1265..0649e7fe9 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -19,13 +19,12 @@ use std::io::{Write, Read, BufReader, BufRead}; use std::time::Duration; use std::path::Path; use std::fs::File; -use util::{clean_0x, U256, Uint, Address, path, H256, CompactionProfile}; +use util::{clean_0x, U256, Uint, Address, path, CompactionProfile}; use util::journaldb::Algorithm; -use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; +use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig}; use ethcore::miner::PendingSet; use cache::CacheConfig; -use dir::Directories; -use params::Pruning; +use dir::DatabaseDirectories; use upgrade::upgrade; use migration::migrate; use ethsync::is_valid_node_url; @@ -190,16 +189,13 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration { #[cfg_attr(feature = "dev", allow(too_many_arguments))] pub fn to_client_config( cache_config: &CacheConfig, - dirs: &Directories, - genesis_hash: H256, mode: Mode, - tracing: Switch, - pruning: Pruning, + tracing: bool, compaction: DatabaseCompactionProfile, wal: bool, vm_type: VMType, name: String, - fork_name: Option<&String>, + pruning: Algorithm, ) -> ClientConfig { let mut client_config = ClientConfig::default(); @@ -221,7 +217,7 @@ pub fn to_client_config( client_config.mode = mode; client_config.tracing.enabled = tracing; - client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name); + client_config.pruning = pruning; client_config.db_compaction = compaction; client_config.db_wal = wal; client_config.vm_type = vm_type; @@ -230,14 +226,12 @@ pub fn to_client_config( } pub fn execute_upgrades( - dirs: &Directories, - genesis_hash: H256, - fork_name: Option<&String>, + dirs: &DatabaseDirectories, pruning: Algorithm, compaction_profile: CompactionProfile ) -> Result<(), String> { - match upgrade(Some(&dirs.db)) { + match upgrade(Some(&dirs.path)) { Ok(upgrades_applied) if upgrades_applied > 0 => { debug!("Executed {} upgrade scripts - ok", upgrades_applied); }, @@ -247,7 +241,7 @@ pub fn execute_upgrades( _ => {}, } - let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning); + let client_path = dirs.version_path(pruning); migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) } diff --git a/parity/informant.rs b/parity/informant.rs index 58accd140..f2cc41f64 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -41,7 +41,9 @@ pub struct Informant { skipped: AtomicUsize, } -trait MillisecondDuration { +/// Something that can be converted to milliseconds. +pub trait MillisecondDuration { + /// Get the value in milliseconds. fn as_milliseconds(&self) -> u64; } diff --git a/parity/io_handler.rs b/parity/io_handler.rs index d60f80f9a..bf73f55bb 100644 --- a/parity/io_handler.rs +++ b/parity/io_handler.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use ethcore::client::Client; use ethcore::service::ClientIoMessage; use ethsync::{SyncProvider, ManageNetwork}; @@ -31,6 +32,7 @@ pub struct ClientIoHandler { pub net: Arc, pub accounts: Arc, pub info: Arc, + pub shutdown: Arc } impl IoHandler for ClientIoHandler { @@ -39,8 +41,24 @@ impl IoHandler for ClientIoHandler { } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if let INFO_TIMER = timer { + if timer == INFO_TIMER && !self.shutdown.load(Ordering::SeqCst) { self.info.tick(); } } } + +pub struct ImportIoHandler { + pub info: Arc, +} + +impl IoHandler for ImportIoHandler { + fn initialize(&self, io: &IoContext) { + io.register_timer(INFO_TIMER, 5000).expect("Error registering timer"); + } + + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if let INFO_TIMER = timer { + self.info.tick() + } + } +} \ No newline at end of file diff --git a/parity/main.rs b/parity/main.rs index 9c2ae7942..b74af7b3d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -39,6 +39,8 @@ extern crate semver; extern crate ethcore_io as io; extern crate ethcore_ipc as ipc; extern crate ethcore_ipc_nano as nanoipc; +extern crate serde; +extern crate serde_json; extern crate rlp; extern crate json_ipc_server as jsonipc; @@ -51,6 +53,7 @@ extern crate ansi_term; extern crate regex; extern crate isatty; +extern crate toml; #[macro_use] extern crate ethcore_util as util; @@ -105,22 +108,37 @@ mod run; mod sync; #[cfg(feature="ipc")] mod boot; +mod user_defaults; #[cfg(feature="stratum")] mod stratum; use std::{process, env}; -use cli::print_version; +use std::io::BufReader; +use std::fs::File; +use util::sha3::sha3; +use cli::Args; use configuration::{Cmd, Configuration}; use deprecated::find_deprecated; +fn print_hash_of(maybe_file: Option) -> Result { + if let Some(file) = maybe_file { + let mut f = BufReader::new(try!(File::open(&file).map_err(|_| "Unable to open file".to_owned()))); + let hash = try!(sha3(&mut f).map_err(|_| "Unable to read from file".to_owned())); + Ok(hash.hex()) + } else { + Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) + } +} + fn execute(command: Cmd) -> Result { match command { Cmd::Run(run_cmd) => { try!(run::execute(run_cmd)); Ok("".into()) }, - Cmd::Version => Ok(print_version()), + Cmd::Version => Ok(Args::print_version()), + Cmd::Hash(maybe_file) => print_hash_of(maybe_file), Cmd::Account(account_cmd) => account::execute(account_cmd), Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), @@ -130,7 +148,8 @@ fn execute(command: Cmd) -> Result { } fn start() -> Result { - let conf = Configuration::parse(env::args()).unwrap_or_else(|e| e.exit()); + let args: Vec = env::args().collect(); + let conf = Configuration::parse(&args).unwrap_or_else(|e| e.exit()); let deprecated = find_deprecated(&conf.args); for d in deprecated { diff --git a/parity/modules.rs b/parity/modules.rs index 73de6ca29..53cef4741 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -71,7 +71,7 @@ mod ipc_deps { pub use ethsync::{SyncClient, NetworkManagerClient, ServiceConfiguration}; pub use ethcore::client::ChainNotifyClient; pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL}; - pub use nanoipc::{GuardedSocket, NanoSocket, init_client}; + pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client}; pub use ipc::IpcSocket; pub use ipc::binary::serialize; } @@ -134,11 +134,11 @@ pub fn sync hypervisor.start(); hypervisor.wait_for_startup(); - let sync_client = init_client::>( + let sync_client = generic_client::>( &service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap(); - let notify_client = init_client::>( + let notify_client = generic_client::>( &service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap(); - let manage_client = init_client::>( + let manage_client = generic_client::>( &service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap(); *hypervisor_ref = Some(hypervisor); diff --git a/parity/params.rs b/parity/params.rs index c67520aa1..71f702cfb 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -14,15 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::str::FromStr; -use std::fs; +use std::{str, fs}; use std::time::Duration; -use util::{H256, Address, U256, version_data}; +use util::{Address, U256, version_data}; use util::journaldb::Algorithm; use ethcore::spec::Spec; use ethcore::ethereum; use ethcore::miner::{GasPricer, GasPriceCalibratorOptions}; -use dir::Directories; +use user_defaults::UserDefaults; #[derive(Debug, PartialEq)] pub enum SpecType { @@ -39,7 +38,7 @@ impl Default for SpecType { } } -impl FromStr for SpecType { +impl str::FromStr for SpecType { type Err = String; fn from_str(s: &str) -> Result { @@ -81,7 +80,7 @@ impl Default for Pruning { } } -impl FromStr for Pruning { +impl str::FromStr for Pruning { type Err = String; fn from_str(s: &str) -> Result { @@ -93,24 +92,12 @@ impl FromStr for Pruning { } impl Pruning { - pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { + pub fn to_algorithm(&self, user_defaults: &UserDefaults) -> Algorithm { match *self { Pruning::Specific(algo) => algo, - Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name), + Pruning::Auto => user_defaults.pruning, } } - - fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { - let mut algo_types = Algorithm::all_types(); - // if all dbs have the same modification time, the last element is the default one - algo_types.push(Algorithm::default()); - - algo_types.into_iter().max_by_key(|i| { - let mut client_path = dirs.client_path(genesis_hash, fork_name, *i); - client_path.push("CURRENT"); - fs::metadata(&client_path).and_then(|m| m.modified()).ok() - }).unwrap() - } } #[derive(Debug, PartialEq)] @@ -128,7 +115,7 @@ impl Default for ResealPolicy { } } -impl FromStr for ResealPolicy { +impl str::FromStr for ResealPolicy { type Err = String; fn from_str(s: &str) -> Result { @@ -223,10 +210,50 @@ impl Default for MinerExtras { } } +/// 3-value enum. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Switch { + /// True. + On, + /// False. + Off, + /// Auto. + Auto, +} + +impl Default for Switch { + fn default() -> Self { + Switch::Auto + } +} + +impl str::FromStr for Switch { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "on" => Ok(Switch::On), + "off" => Ok(Switch::Off), + "auto" => Ok(Switch::Auto), + other => Err(format!("Invalid switch value: {}", other)) + } + } +} + +pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> Result { + match (user_defaults.is_first_launch, switch, user_defaults.tracing) { + (false, Switch::On, false) => Err("TraceDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + } +} + #[cfg(test)] mod tests { use util::journaldb::Algorithm; - use super::{SpecType, Pruning, ResealPolicy}; + use user_defaults::UserDefaults; + use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool}; #[test] fn test_spec_type_parsing() { @@ -274,4 +301,36 @@ mod tests { let all = ResealPolicy { own: true, external: true }; assert_eq!(all, ResealPolicy::default()); } + + #[test] + fn test_switch_parsing() { + assert_eq!(Switch::On, "on".parse().unwrap()); + assert_eq!(Switch::Off, "off".parse().unwrap()); + assert_eq!(Switch::Auto, "auto".parse().unwrap()); + } + + #[test] + fn test_switch_default() { + assert_eq!(Switch::default(), Switch::Auto); + } + + fn user_defaults_with_tracing(first_launch: bool, tracing: bool) -> UserDefaults { + let mut ud = UserDefaults::default(); + ud.is_first_launch = first_launch; + ud.tracing = tracing; + ud + } + + #[test] + fn test_switch_to_bool() { + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, true)).unwrap()); + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, false)).unwrap()); + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, true)).unwrap()); + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, false)).unwrap()); + + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, true)).unwrap()); + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, false)).unwrap()); + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, true)).unwrap()); + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, false)).is_err()); + } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 624c6b3f4..29b33b844 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -25,7 +25,7 @@ use ethcore::client::Client; use ethcore::account_provider::AccountProvider; use ethsync::{ManageNetwork, SyncProvider}; use ethcore_rpc::{Extendable, NetworkSettings}; -pub use ethcore_rpc::ConfirmationsQueue; +pub use ethcore_rpc::SignerService; #[derive(Debug, PartialEq, Clone, Eq, Hash)] @@ -94,7 +94,7 @@ impl FromStr for ApiSet { pub struct Dependencies { pub signer_port: Option, - pub signer_queue: Arc, + pub signer_service: Arc, pub client: Arc, pub sync: Arc, pub net: Arc, @@ -173,7 +173,7 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet server.add_delegate(filter_client.to_delegate()); if deps.signer_port.is_some() { - server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); + server.add_delegate(EthSigningQueueClient::new(&deps.signer_service, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); } else { server.add_delegate(EthSigningUnsafeClient::new(&deps.client, &deps.secret_store, &deps.miner).to_delegate()); } @@ -182,11 +182,11 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet server.add_delegate(PersonalClient::new(&deps.secret_store, &deps.client, &deps.miner, deps.signer_port, deps.geth_compatibility).to_delegate()); }, Api::Signer => { - server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_queue).to_delegate()); + server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_service).to_delegate()); }, Api::Ethcore => { - let queue = deps.signer_port.map(|_| deps.signer_queue.clone()); - server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, &deps.sync, &deps.net_service, deps.logger.clone(), deps.settings.clone(), queue).to_delegate()) + let signer = deps.signer_port.map(|_| deps.signer_service.clone()); + server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, &deps.sync, &deps.net_service, deps.logger.clone(), deps.settings.clone(), signer).to_delegate()) }, Api::EthcoreSet => { server.add_delegate(EthcoreSetClient::new(&deps.client, &deps.miner, &deps.net_service).to_delegate()) diff --git a/parity/run.rs b/parity/run.rs index 720e6f1bf..e95b5c9f5 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -23,7 +23,7 @@ use ethcore_rpc::NetworkSettings; use ethsync::NetworkConfiguration; use util::{Colour, version, U256}; use io::{MayPanic, ForwardPanic, PanicHandler}; -use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNotify}; +use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, ChainNotify}; use ethcore::service::ClientService; use ethcore::account_provider::AccountProvider; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; @@ -35,10 +35,11 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; use signer::SignerServer; use dapps::WebappServer; use io_handler::ClientIoHandler; -use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras}; +use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool}; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use dir::Directories; use cache::CacheConfig; +use user_defaults::UserDefaults; use dapps; use signer; use modules; @@ -87,34 +88,45 @@ pub struct RunCmd { } pub fn execute(cmd: RunCmd) -> Result<(), String> { - // increase max number of open files - raise_fd_limit(); + // set up panic handler + let panic_handler = PanicHandler::new_in_arc(); // set up logger let logger = try!(setup_log(&cmd.logger_config)); - // set up panic handler - let panic_handler = PanicHandler::new_in_arc(); + // increase max number of open files + raise_fd_limit(); // create dirs used by parity try!(cmd.dirs.create_dirs()); // load spec let spec = try!(cmd.spec.spec()); - let fork_name = spec.fork_name.clone(); // load genesis hash let genesis_hash = spec.genesis_header().hash(); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); - let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // run in daemon mode if let Some(pid_file) = cmd.daemon { @@ -152,16 +164,13 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // create client config let client_config = to_client_config( &cmd.cache_config, - &cmd.dirs, - genesis_hash, cmd.mode, - cmd.tracing, - cmd.pruning, + tracing, cmd.compaction, cmd.wal, cmd.vm_type, cmd.name, - fork_name.as_ref(), + algorithm, ); // set up bootnodes @@ -206,9 +215,10 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { } // set up dependencies for rpc servers + let signer_path = cmd.signer_conf.signer_path.clone(); let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { signer_port: cmd.signer_port, - signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()), + signer_service: Arc::new(rpc_apis::SignerService::new(move || signer::new_token(signer_path.clone()))), client: client.clone(), sync: sync_provider.clone(), net: manage_network.clone(), @@ -257,8 +267,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { sync: sync_provider.clone(), net: manage_network.clone(), accounts: account_provider.clone(), + shutdown: Default::default(), }); - service.register_io_handler(io_handler).expect("Error registering IO handler"); + service.register_io_handler(io_handler.clone()).expect("Error registering IO handler"); // the watcher must be kept alive. let _watcher = match cmd.no_periodic_snapshot { @@ -286,9 +297,19 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port)); } + // save user defaults + user_defaults.pruning = algorithm; + user_defaults.tracing = tracing; + try!(user_defaults.save(&user_defaults_path)); + // Handle exit wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); + // to make sure timer does not spawn requests while shutdown is in progress + io_handler.shutdown.store(true, ::std::sync::atomic::Ordering::SeqCst); + // just Arc is dropping here, to allow other reference release in its default time + drop(io_handler); + // hypervisor should be shutdown first while everything still works and can be // terminated gracefully drop(hypervisor); diff --git a/parity/signer.rs b/parity/signer.rs index e6924dcef..b60bc7211 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -90,7 +90,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result(snapshot: Arc, reader: &R, recover: bool) -> Result<(), String> { + let manifest = reader.manifest(); + + info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash); + + try!(snapshot.init_restore(manifest.clone(), recover).map_err(|e| { + format!("Failed to begin restoration: {}", e) + })); + + let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); + + let informant_handle = snapshot.clone(); + ::std::thread::spawn(move || { + while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() { + info!("Processed {}/{} state chunks and {}/{} block chunks.", + state_chunks_done, num_state, block_chunks_done, num_blocks); + ::std::thread::sleep(Duration::from_secs(5)); + } + }); + + info!("Restoring state"); + for &state_hash in &manifest.state_hashes { + if snapshot.status() == RestorationStatus::Failed { + return Err("Restoration failed".into()); + } + + let chunk = try!(reader.chunk(state_hash) + .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))); + snapshot.feed_state_chunk(state_hash, &chunk); + } + + info!("Restoring blocks"); + for &block_hash in &manifest.block_hashes { + if snapshot.status() == RestorationStatus::Failed { + return Err("Restoration failed".into()); + } + + let chunk = try!(reader.chunk(block_hash) + .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))); + snapshot.feed_block_chunk(block_hash, &chunk); + } + + match snapshot.status() { + RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), + RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), + RestorationStatus::Inactive => { + info!("Restoration complete."); + Ok(()) + } + } +} + impl SnapshotCommand { // shared portion of snapshot commands: start the client service fn start_service(self) -> Result<(ClientService, Arc), String> { @@ -74,23 +130,35 @@ impl SnapshotCommand { // load genesis hash let genesis_hash = spec.genesis_header().hash(); + // database paths + let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); + // Setup logging let _logger = setup_log(&self.logger_config); fdlimit::raise_fd_limit(); // select pruning algorithm - let algorithm = self.pruning.to_algorithm(&self.dirs, genesis_hash, spec.fork_name.as_ref()); + let algorithm = self.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = self.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); - let snapshot_path = self.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&self.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, self.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&self.cache_config, &self.dirs, genesis_hash, self.mode, self.tracing, self.pruning, self.compaction, self.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); + let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, @@ -106,69 +174,35 @@ impl SnapshotCommand { /// restore from a snapshot pub fn restore(self) -> Result<(), String> { - let file = try!(self.file_path.clone().ok_or("No file path provided.".to_owned())); + let file = self.file_path.clone(); let (service, _panic_handler) = try!(self.start_service()); warn!("Snapshot restoration is experimental and the format may be subject to change."); warn!("On encountering an unexpected error, please ensure that you have a recent snapshot."); let snapshot = service.snapshot_service(); - let reader = PackedReader::new(Path::new(&file)) - .map_err(|e| format!("Couldn't open snapshot file: {}", e)) - .and_then(|x| x.ok_or("Snapshot file has invalid format.".into())); - let reader = try!(reader); - let manifest = reader.manifest(); + if let Some(file) = file { + info!("Attempting to restore from snapshot at '{}'", file); - // drop the client so we don't restore while it has open DB handles. - drop(service); + let reader = PackedReader::new(Path::new(&file)) + .map_err(|e| format!("Couldn't open snapshot file: {}", e)) + .and_then(|x| x.ok_or("Snapshot file has invalid format.".into())); - try!(snapshot.init_restore(manifest.clone()).map_err(|e| { - format!("Failed to begin restoration: {}", e) - })); + let reader = try!(reader); + try!(restore_using(snapshot, &reader, true)); + } else { + info!("Attempting to restore from local snapshot."); - let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); - - let informant_handle = snapshot.clone(); - ::std::thread::spawn(move || { - while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() { - info!("Processed {}/{} state chunks and {}/{} block chunks.", - state_chunks_done, num_state, block_chunks_done, num_blocks); - - ::std::thread::sleep(Duration::from_secs(5)); - } - }); - - info!("Restoring state"); - for &state_hash in &manifest.state_hashes { - if snapshot.status() == RestorationStatus::Failed { - return Err("Restoration failed".into()); - } - - let chunk = try!(reader.chunk(state_hash) - .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))); - snapshot.feed_state_chunk(state_hash, &chunk); - } - - info!("Restoring blocks"); - for &block_hash in &manifest.block_hashes { - if snapshot.status() == RestorationStatus::Failed { - return Err("Restoration failed".into()); - } - - let chunk = try!(reader.chunk(block_hash) - .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))); - snapshot.feed_block_chunk(block_hash, &chunk); - } - - match snapshot.status() { - RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), - RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), - RestorationStatus::Inactive => { - info!("Restoration complete."); - Ok(()) + // attempting restoration with recovery will lead to deadlock + // as we currently hold a read lock on the service's reader. + match *snapshot.reader() { + Some(ref reader) => try!(restore_using(snapshot.clone(), reader, false)), + None => return Err("No local snapshot found.".into()), } } + + Ok(()) } /// Take a snapshot from the head of the chain. diff --git a/parity/user_defaults.rs b/parity/user_defaults.rs new file mode 100644 index 000000000..8a1feebae --- /dev/null +++ b/parity/user_defaults.rs @@ -0,0 +1,98 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fs::File; +use std::io::Write; +use std::path::Path; +use std::collections::BTreeMap; +use serde::{Serialize, Serializer, Error, Deserialize, Deserializer}; +use serde::de::{Visitor, MapVisitor}; +use serde::de::impls::BTreeMapVisitor; +use serde_json::Value; +use serde_json::de::from_reader; +use serde_json::ser::to_string; +use util::journaldb::Algorithm; + +pub struct UserDefaults { + pub is_first_launch: bool, + pub pruning: Algorithm, + pub tracing: bool, +} + +impl Serialize for UserDefaults { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: Serializer { + let mut map: BTreeMap = BTreeMap::new(); + map.insert("pruning".into(), Value::String(self.pruning.as_str().into())); + map.insert("tracing".into(), Value::Bool(self.tracing)); + map.serialize(serializer) + } +} + +struct UserDefaultsVisitor; + +impl Deserialize for UserDefaults { + fn deserialize(deserializer: &mut D) -> Result + where D: Deserializer { + deserializer.deserialize(UserDefaultsVisitor) + } +} + +impl Visitor for UserDefaultsVisitor { + type Value = UserDefaults; + + fn visit_map(&mut self, visitor: V) -> Result + where V: MapVisitor { + let mut map: BTreeMap = try!(BTreeMapVisitor::new().visit_map(visitor)); + let pruning: Value = try!(map.remove("pruning".into()).ok_or_else(|| Error::custom("missing pruning"))); + let pruning = try!(pruning.as_str().ok_or_else(|| Error::custom("invalid pruning value"))); + let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method"))); + let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing"))); + let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value"))); + + let user_defaults = UserDefaults { + is_first_launch: false, + pruning: pruning, + tracing: tracing, + }; + + Ok(user_defaults) + } +} + +impl Default for UserDefaults { + fn default() -> Self { + UserDefaults { + is_first_launch: true, + pruning: Algorithm::default(), + tracing: false, + } + } +} + +impl UserDefaults { + pub fn load

(path: P) -> Result where P: AsRef { + match File::open(path) { + Ok(file) => from_reader(file).map_err(|e| e.to_string()), + _ => Ok(UserDefaults::default()), + } + } + + pub fn save

(self, path: P) -> Result<(), String> where P: AsRef { + let mut file: File = try!(File::create(path).map_err(|_| "Cannot create user defaults file".to_owned())); + file.write_all(to_string(&self).unwrap().as_bytes()).map_err(|_| "Failed to save user defaults".to_owned()) + } +} diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 8bb16007f..c3f9cddbd 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -17,6 +17,7 @@ jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc-http-server.gi ethcore-io = { path = "../util/io" } ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } +ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } ethstore = { path = "../ethstore" } ethash = { path = "../ethash" } @@ -27,7 +28,7 @@ rlp = { path = "../util/rlp" } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.8.0", optional = true } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } ethcore-ipc = { path = "../ipc/rpc" } time = "0.1" diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 17d1837ae..7f2f11400 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -28,6 +28,7 @@ extern crate jsonrpc_http_server; extern crate ethcore_io as io; extern crate ethcore; extern crate ethkey; +extern crate ethcrypto as crypto; extern crate ethstore; extern crate ethsync; extern crate transient_hashmap; @@ -53,7 +54,7 @@ use self::jsonrpc_core::{IoHandler, IoDelegate}; pub use jsonrpc_http_server::{ServerBuilder, Server, RpcServerError}; pub mod v1; -pub use v1::{SigningQueue, ConfirmationsQueue, NetworkSettings}; +pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings}; /// An object that can be extended with `IoDelegates` pub trait Extendable { diff --git a/rpc/src/v1/helpers/auto_args.rs b/rpc/src/v1/helpers/auto_args.rs new file mode 100644 index 000000000..c7deb0436 --- /dev/null +++ b/rpc/src/v1/helpers/auto_args.rs @@ -0,0 +1,171 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Automatically serialize and deserialize parameters around a strongly-typed function. + +// because we reuse the type names as idents in the macros as a dirty hack to +// work around `concat_idents!` being unstable. +#![allow(non_snake_case)] + +use super::errors; + +use jsonrpc_core::{Error, Params, Value, from_params, to_value}; +use serde::{Serialize, Deserialize}; + +/// Auto-generates an RPC trait from trait definition. +/// +/// This just copies out all the methods, docs, and adds another +/// function `to_delegate` which will automatically wrap each strongly-typed +/// function in a wrapper which handles parameter and output type serialization. +/// +/// Every function must have a `#[name("rpc_nameHere")]` attribute after +/// its documentation, and no other attributes. All function names are +/// allowed except for `to_delegate`, which is auto-generated. +macro_rules! build_rpc_trait { + ( + $(#[$t_attr: meta])* + pub trait $name: ident { + $( + $(#[doc=$m_doc: expr])* #[name($rpc_name: expr)] + fn $method: ident (&self $(, $param: ty)*) -> $out: ty; + )* + } + ) => { + $(#[$t_attr])* + pub trait $name: Sized + Send + Sync + 'static { + $( + $(#[doc=$m_doc])* + fn $method(&self $(, $param)*) -> $out; + )* + + /// Transform this into an `IoDelegate`, automatically wrapping + /// the parameters. + fn to_delegate(self) -> ::jsonrpc_core::IoDelegate { + let mut del = ::jsonrpc_core::IoDelegate::new(self.into()); + $( + del.add_method($rpc_name, move |base, params| { + ($name::$method as fn(&_ $(, $param)*) -> $out).wrap_rpc(base, params) + }); + )* + del + } + } + } +} + +/// A wrapper type without an implementation of `Deserialize` +/// which allows a special implementation of `Wrap` for functions +/// that take a trailing default parameter. +pub struct Trailing(pub T); + +/// Wrapper trait for synchronous RPC functions. +pub trait Wrap { + fn wrap_rpc(&self, base: &B, params: Params) -> Result; +} + +// special impl for no parameters. +impl Wrap for fn(&B) -> Result + where B: Send + Sync + 'static, OUT: Serialize +{ + fn wrap_rpc(&self, base: &B, params: Params) -> Result { + ::v1::helpers::params::expect_no_params(params) + .and_then(|()| (self)(base)) + .map(to_value) + } +} + +// creates a wrapper implementation which deserializes the parameters, +// calls the function with concrete type, and serializes the output. +macro_rules! wrap { + ($($x: ident),+) => { + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + > Wrap for fn(&BASE, $($x,)+) -> Result { + fn wrap_rpc(&self, base: &BASE, params: Params) -> Result { + from_params::<($($x,)+)>(params).and_then(|($($x,)+)| { + (self)(base, $($x,)+) + }).map(to_value) + } + } + } +} + +// special impl for no parameters other than block parameter. +impl Wrap for fn(&B, Trailing) -> Result + where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize +{ + fn wrap_rpc(&self, base: &B, params: Params) -> Result { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return Err(errors::invalid_params("not an array", "")), + }; + + let (id,) = match len { + 0 => (T::default(),), + 1 => try!(from_params::<(T,)>(params)), + _ => return Err(Error::invalid_params()), + }; + + (self)(base, Trailing(id)).map(to_value) + } +} + +// similar to `wrap!`, but handles a single default trailing parameter +// accepts an additional argument indicating the number of non-trailing parameters. +macro_rules! wrap_with_trailing { + ($num: expr, $($x: ident),+) => { + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + TRAILING: Default + Deserialize, + > Wrap for fn(&BASE, $($x,)+ Trailing) -> Result { + fn wrap_rpc(&self, base: &BASE, params: Params) -> Result { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return Err(errors::invalid_params("not an array", "")), + }; + + let params = match len - $num { + 0 => from_params::<($($x,)+)>(params) + .map(|($($x,)+)| ($($x,)+ TRAILING::default())), + 1 => from_params::<($($x,)+ TRAILING)>(params) + .map(|($($x,)+ id)| ($($x,)+ id)), + _ => Err(Error::invalid_params()), + }; + + let ($($x,)+ id) = try!(params); + (self)(base, $($x,)+ Trailing(id)).map(to_value) + } + } + } +} + +wrap!(A, B, C, D, E); +wrap!(A, B, C, D); +wrap!(A, B, C); +wrap!(A, B); +wrap!(A); + +wrap_with_trailing!(5, A, B, C, D, E); +wrap_with_trailing!(4, A, B, C, D); +wrap_with_trailing!(3, A, B, C); +wrap_with_trailing!(2, A, B); +wrap_with_trailing!(1, A); \ No newline at end of file diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 5587673d8..df2d8cbd3 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -41,7 +41,7 @@ fn prepare_transaction(client: &C, miner: &M, request: TransactionRequest) } } -pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result +pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result where C: MiningBlockChainClient, M: MinerService { let hash = RpcH256::from(signed_transaction.hash()); @@ -49,7 +49,7 @@ pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: Sig import .map_err(errors::from_transaction_error) - .map(|_| to_value(&hash)) + .map(|_| hash) } pub fn signature_with_password(accounts: &AccountProvider, address: Address, hash: H256, pass: String) -> Result { @@ -70,7 +70,7 @@ pub fn unlock_sign_and_dispatch(client: &C, miner: &M, request: Transactio }; trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty()); - dispatch_transaction(&*client, &*miner, signed_transaction) + dispatch_transaction(&*client, &*miner, signed_transaction).map(to_value) } pub fn sign_and_dispatch(client: &C, miner: &M, request: TransactionRequest, account_provider: &AccountProvider, address: Address) -> Result @@ -84,7 +84,7 @@ pub fn sign_and_dispatch(client: &C, miner: &M, request: TransactionReques }; trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty()); - dispatch_transaction(&*client, &*miner, signed_transaction) + dispatch_transaction(&*client, &*miner, signed_transaction).map(to_value) } pub fn default_gas_price(client: &C, miner: &M) -> U256 where C: MiningBlockChainClient, M: MinerService { diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 8a11f2466..18e369208 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -139,6 +139,13 @@ pub fn no_author() -> Error { } } +pub fn token(e: String) -> Error { + Error { + code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), + message: "There was an error when saving your authorization tokens.".into(), + data: Some(Value::String(e)), + } +} pub fn signer_disabled() -> Error { Error { diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index d71eaac41..e6ada3379 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -14,18 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +#[macro_use] +pub mod auto_args; + #[macro_use] pub mod errors; + pub mod dispatch; pub mod params; + mod poll_manager; mod poll_filter; mod requests; +mod signer; mod signing_queue; mod network_settings; pub use self::poll_manager::PollManager; -pub use self::poll_filter::PollFilter; +pub use self::poll_filter::{PollFilter, limit_logs}; pub use self::requests::{TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest}; pub use self::signing_queue::{ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent}; +pub use self::signer::SignerService; pub use self::network_settings::NetworkSettings; diff --git a/rpc/src/v1/helpers/params.rs b/rpc/src/v1/helpers/params.rs index c38529e4e..f56c500fc 100644 --- a/rpc/src/v1/helpers/params.rs +++ b/rpc/src/v1/helpers/params.rs @@ -28,21 +28,14 @@ pub fn expect_no_params(params: Params) -> Result<(), Error> { } } -fn params_len(params: &Params) -> usize { +/// Returns number of different parameters in given `Params` object. +pub fn params_len(params: &Params) -> usize { match params { &Params::Array(ref vec) => vec.len(), _ => 0, } } -/// Deserialize request parameters with optional second parameter `BlockNumber` defaulting to `BlockNumber::Latest`. -pub fn from_params_default_second(params: Params) -> Result<(F, BlockNumber, ), Error> where F: serde::de::Deserialize { - match params_len(¶ms) { - 1 => from_params::<(F, )>(params).map(|(f,)| (f, BlockNumber::Latest)), - _ => from_params::<(F, BlockNumber)>(params), - } -} - /// Deserialize request parameters with optional third parameter `BlockNumber` defaulting to `BlockNumber::Latest`. pub fn from_params_default_third(params: Params) -> Result<(F1, F2, BlockNumber, ), Error> where F1: serde::de::Deserialize, F2: serde::de::Deserialize { match params_len(¶ms) { diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 31bbf47fe..faae75c98 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -13,6 +13,15 @@ pub enum PollFilter { Block(BlockNumber), /// Hashes of all transactions which client was notified about. PendingTransaction(Vec), - /// Number of From block number, pending logs and log filter iself. + /// Number of From block number, pending logs and log filter itself. Logs(BlockNumber, HashSet, Filter) } + +/// Returns only last `n` logs +pub fn limit_logs(mut logs: Vec, limit: Option) -> Vec { + let len = logs.len(); + match limit { + Some(limit) if len >= limit => logs.split_off(len - limit), + _ => logs, + } +} diff --git a/rpc/src/v1/helpers/signer.rs b/rpc/src/v1/helpers/signer.rs new file mode 100644 index 000000000..2cebc8261 --- /dev/null +++ b/rpc/src/v1/helpers/signer.rs @@ -0,0 +1,61 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::ops::Deref; +use v1::helpers::signing_queue::{ConfirmationsQueue}; + +/// Manages communication with Signer crate +pub struct SignerService { + queue: Arc, + generate_new_token: Box Result + Send + Sync + 'static>, +} + +impl SignerService { + + /// Creates new Signer Service given function to generate new tokens. + pub fn new(new_token: F) -> Self + where F: Fn() -> Result + Send + Sync + 'static { + SignerService { + queue: Arc::new(ConfirmationsQueue::default()), + generate_new_token: Box::new(new_token), + } + } + + /// Generates new token. + pub fn generate_token(&self) -> Result { + (self.generate_new_token)() + } + + /// Returns a reference to `ConfirmationsQueue` + pub fn queue(&self) -> Arc { + self.queue.clone() + } + + #[cfg(test)] + /// Creates new Signer Service for tests. + pub fn new_test() -> Self { + SignerService::new(|| Ok("new_token".into())) + } +} + +impl Deref for SignerService { + type Target = ConfirmationsQueue; + fn deref(&self) -> &Self::Target { + &self.queue + } +} + diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 9487f020d..755539ebd 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -42,10 +42,14 @@ use ethcore::log_entry::LogEntry; use ethcore::filter::Filter as EthcoreFilter; use self::ethash::SeedHashCompute; use v1::traits::Eth; -use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256}; -use v1::helpers::{CallRequest as CRequest, errors}; +use v1::types::{ + Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, + Transaction, CallRequest, Index, Filter, Log, Receipt, Work, + H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256, +}; +use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; use v1::helpers::dispatch::{default_gas_price, dispatch_transaction}; -use v1::helpers::params::{expect_no_params, from_params_default_second, from_params_default_third}; +use v1::helpers::auto_args::Trailing; /// Eth RPC options pub struct EthClientOptions { @@ -100,7 +104,7 @@ impl EthClient where } } - fn block(&self, id: BlockID, include_txs: bool) -> Result { + fn block(&self, id: BlockID, include_txs: bool) -> Result, Error> { let client = take_weak!(self.client); match (client.block(id.clone()), client.block_total_difficulty(id)) { (Some(bytes), Some(total_difficulty)) => { @@ -131,28 +135,28 @@ impl EthClient where }, extra_data: Bytes::new(view.extra_data()) }; - Ok(to_value(&block)) + Ok(Some(block)) }, - _ => Ok(Value::Null) + _ => Ok(None) } } - fn transaction(&self, id: TransactionID) -> Result { + fn transaction(&self, id: TransactionID) -> Result, Error> { match take_weak!(self.client).transaction(id) { - Some(t) => Ok(to_value(&Transaction::from(t))), - None => Ok(Value::Null) + Some(t) => Ok(Some(Transaction::from(t))), + None => Ok(None), } } - fn uncle(&self, id: UncleID) -> Result { + fn uncle(&self, id: UncleID) -> Result, Error> { let client = take_weak!(self.client); let uncle: BlockHeader = match client.uncle(id) { Some(rlp) => rlp::decode(&rlp), - None => { return Ok(Value::Null); } + None => { return Ok(None); } }; let parent_difficulty = match client.block_total_difficulty(BlockID::Hash(uncle.parent_hash().clone())) { Some(difficulty) => difficulty, - None => { return Ok(Value::Null); } + None => { return Ok(None); } }; let block = Block { @@ -177,7 +181,7 @@ impl EthClient where uncles: vec![], transactions: BlockTransactions::Hashes(vec![]), }; - Ok(to_value(&block)) + Ok(Some(block)) } fn sign_call(&self, request: CRequest) -> Result { @@ -240,20 +244,19 @@ impl Eth for EthClient where M: MinerService + 'static, EM: ExternalMinerService + 'static { - fn protocol_version(&self, params: Params) -> Result { + fn protocol_version(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())) + let version = take_weak!(self.sync).status().protocol_version.to_owned(); + Ok(format!("{}", version)) } - fn syncing(&self, params: Params) -> Result { + fn syncing(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let status = take_weak!(self.sync).status(); - let res = match status.state { - SyncState::Idle => SyncStatus::None, + match status.state { + SyncState::Idle => Ok(SyncStatus::None), SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead | SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => { let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number); @@ -265,260 +268,242 @@ impl Eth for EthClient where current_block: current_block.into(), highest_block: highest_block.into(), }; - SyncStatus::Info(info) + Ok(SyncStatus::Info(info)) } else { - SyncStatus::None + Ok(SyncStatus::None) } } - }; - Ok(to_value(&res)) + } } - fn author(&self, params: Params) -> Result { + fn author(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&RpcH160::from(take_weak!(self.miner).author()))) + Ok(RpcH160::from(take_weak!(self.miner).author())) } - fn is_mining(&self, params: Params) -> Result { + fn is_mining(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&(take_weak!(self.miner).is_sealing()))) + Ok(take_weak!(self.miner).is_sealing()) } - fn hashrate(&self, params: Params) -> Result { + fn hashrate(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&RpcU256::from(self.external_miner.hashrate()))) + Ok(RpcU256::from(self.external_miner.hashrate())) } - fn gas_price(&self, params: Params) -> Result { + fn gas_price(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let (client, miner) = (take_weak!(self.client), take_weak!(self.miner)); - Ok(to_value(&RpcU256::from(default_gas_price(&*client, &*miner)))) + Ok(RpcU256::from(default_gas_price(&*client, &*miner))) } - fn accounts(&self, params: Params) -> Result { + fn accounts(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); let store = take_weak!(self.accounts); let accounts = try!(store.accounts().map_err(|e| errors::internal("Could not fetch accounts.", e))); - Ok(to_value(&accounts.into_iter().map(Into::into).collect::>())) + Ok(accounts.into_iter().map(Into::into).collect()) } - fn block_number(&self, params: Params) -> Result { + fn block_number(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&RpcU256::from(take_weak!(self.client).chain_info().best_block_number))) + Ok(RpcU256::from(take_weak!(self.client).chain_info().best_block_number)) } - fn balance(&self, params: Params) -> Result { + fn balance(&self, address: RpcH160, num: Trailing) -> Result { try!(self.active()); - from_params_default_second(params) - .and_then(|(address, block_number,)| { - let address: Address = RpcH160::into(address); - match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).balance(&*take_weak!(self.client), &address)))), - id => match take_weak!(self.client).balance(&address, id.into()) { - Some(balance) => Ok(to_value(&RpcU256::from(balance))), - None => Err(errors::state_pruned()), - } - } - }) + + let address = address.into(); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).balance(&*take_weak!(self.client), &address).into()), + id => match take_weak!(self.client).balance(&address, id.into()) { + Some(balance) => Ok(balance.into()), + None => Err(errors::state_pruned()), + } + } } - fn storage_at(&self, params: Params) -> Result { + fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing) -> Result { try!(self.active()); - from_params_default_third::(params) - .and_then(|(address, position, block_number,)| { - let address: Address = RpcH160::into(address); - let position: U256 = RpcU256::into(position); - match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position))))), - id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) { - Some(s) => Ok(to_value(&RpcH256::from(s))), - None => Err(errors::state_pruned()), - } - } - }) - + let address: Address = RpcH160::into(address); + let position: U256 = RpcU256::into(pos); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position)).into()), + id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) { + Some(s) => Ok(s.into()), + None => Err(errors::state_pruned()), + } + } } - fn transaction_count(&self, params: Params) -> Result { + fn transaction_count(&self, address: RpcH160, num: Trailing) -> Result { try!(self.active()); - from_params_default_second(params) - .and_then(|(address, block_number,)| { - let address: Address = RpcH160::into(address); - match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).nonce(&*take_weak!(self.client), &address)))), - id => match take_weak!(self.client).nonce(&address, id.into()) { - Some(nonce) => Ok(to_value(&RpcU256::from(nonce))), - None => Err(errors::state_pruned()), - } - } - }) + + let address: Address = RpcH160::into(address); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).nonce(&*take_weak!(self.client), &address).into()), + id => match take_weak!(self.client).nonce(&address, id.into()) { + Some(nonce) => Ok(nonce.into()), + None => Err(errors::state_pruned()), + } + } } - fn block_transaction_count_by_hash(&self, params: Params) -> Result { + fn block_transaction_count_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| // match - take_weak!(self.client).block(BlockID::Hash(hash.into())) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count()))))) + Ok( + take_weak!(self.client).block(BlockID::Hash(hash.into())) + .map(|bytes| BlockView::new(&bytes).transactions_count().into()) + ) } - fn block_transaction_count_by_number(&self, params: Params) -> Result { + fn block_transaction_count_by_number(&self, num: BlockNumber) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber,)>(params) - .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => Ok(to_value( - &RpcU256::from(take_weak!(self.miner).status().transactions_in_pending_block) - )), - _ => take_weak!(self.client).block(block_number.into()) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count())))) - }) + + match num { + BlockNumber::Pending => Ok(Some( + take_weak!(self.miner).status().transactions_in_pending_block.into() + )), + _ => Ok( + take_weak!(self.client).block(num.into()) + .map(|bytes| BlockView::new(&bytes).transactions_count().into()) + ) + } } - fn block_uncles_count_by_hash(&self, params: Params) -> Result { + fn block_uncles_count_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| - take_weak!(self.client).block(BlockID::Hash(hash.into())) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count()))))) + + Ok( + take_weak!(self.client).block(BlockID::Hash(hash.into())) + .map(|bytes| BlockView::new(&bytes).uncles_count().into()) + ) } - fn block_uncles_count_by_number(&self, params: Params) -> Result { + fn block_uncles_count_by_number(&self, num: BlockNumber) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber,)>(params) - .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(0))), - _ => take_weak!(self.client).block(block_number.into()) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count())))) - }) + + match num { + BlockNumber::Pending => Ok(Some(0.into())), + _ => Ok( + take_weak!(self.client).block(num.into()) + .map(|bytes| BlockView::new(&bytes).uncles_count().into()) + ), + } } - fn code_at(&self, params: Params) -> Result { + fn code_at(&self, address: RpcH160, num: Trailing) -> Result { try!(self.active()); - from_params_default_second(params) - .and_then(|(address, block_number,)| { - let address: Address = RpcH160::into(address); - match block_number { - BlockNumber::Pending => Ok(to_value(&take_weak!(self.miner).code(&*take_weak!(self.client), &address).map_or_else(Bytes::default, Bytes::new))), - _ => match take_weak!(self.client).code(&address, block_number.into()) { - Some(code) => Ok(to_value(&code.map_or_else(Bytes::default, Bytes::new))), - None => Err(errors::state_pruned()), - }, - } - }) + + let address: Address = RpcH160::into(address); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).code(&*take_weak!(self.client), &address).map_or_else(Bytes::default, Bytes::new)), + _ => match take_weak!(self.client).code(&address, num.0.into()) { + Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), + None => Err(errors::state_pruned()), + }, + } } - fn block_by_hash(&self, params: Params) -> Result { + fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256, bool)>(params) - .and_then(|(hash, include_txs)| self.block(BlockID::Hash(hash.into()), include_txs)) + + self.block(BlockID::Hash(hash.into()), include_txs) } - fn block_by_number(&self, params: Params) -> Result { + fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber, bool)>(params) - .and_then(|(number, include_txs)| self.block(number.into(), include_txs)) + + self.block(num.into(), include_txs) } - fn transaction_by_hash(&self, params: Params) -> Result { + fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| { - let miner = take_weak!(self.miner); - let hash: H256 = hash.into(); - match miner.transaction(&hash) { - Some(pending_tx) => Ok(to_value(&Transaction::from(pending_tx))), - None => self.transaction(TransactionID::Hash(hash)) - } - }) + + let miner = take_weak!(self.miner); + let hash: H256 = hash.into(); + match miner.transaction(&hash) { + Some(pending_tx) => Ok(Some(pending_tx.into())), + None => self.transaction(TransactionID::Hash(hash)) + } } - fn transaction_by_block_hash_and_index(&self, params: Params) -> Result { + fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256, Index)>(params) - .and_then(|(hash, index)| self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value()))) + + self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value())) } - fn transaction_by_block_number_and_index(&self, params: Params) -> Result { + fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber, Index)>(params) - .and_then(|(number, index)| self.transaction(TransactionID::Location(number.into(), index.value()))) + + self.transaction(TransactionID::Location(num.into(), index.value())) } - fn transaction_receipt(&self, params: Params) -> Result { + fn transaction_receipt(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| { - let miner = take_weak!(self.miner); - let hash: H256 = hash.into(); - match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) { - (Some(receipt), true) => Ok(to_value(&Receipt::from(receipt))), - _ => { - let client = take_weak!(self.client); - let receipt = client.transaction_receipt(TransactionID::Hash(hash)); - Ok(to_value(&receipt.map(Receipt::from))) - } - } - }) + + let miner = take_weak!(self.miner); + let hash: H256 = hash.into(); + match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) { + (Some(receipt), true) => Ok(Some(receipt.into())), + _ => { + let client = take_weak!(self.client); + let receipt = client.transaction_receipt(TransactionID::Hash(hash)); + Ok(receipt.map(Into::into)) + } + } } - fn uncle_by_block_hash_and_index(&self, params: Params) -> Result { + fn uncle_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256, Index)>(params) - .and_then(|(hash, index)| self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() })) + + self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() }) } - fn uncle_by_block_number_and_index(&self, params: Params) -> Result { + fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber, Index)>(params) - .and_then(|(number, index)| self.uncle(UncleID { block: number.into(), position: index.value() })) + + self.uncle(UncleID { block: num.into(), position: index.value() }) } - fn compilers(&self, params: Params) -> Result { + fn compilers(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); let mut compilers = vec![]; if Command::new(SOLC).output().is_ok() { compilers.push("solidity".to_owned()) } - Ok(to_value(&compilers)) + + Ok(compilers) } - fn logs(&self, params: Params) -> Result { - try!(self.active()); - from_params::<(Filter,)>(params) - .and_then(|(filter,)| { - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = filter.into(); - let mut logs = take_weak!(self.client).logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); + fn logs(&self, filter: Filter) -> Result, Error> { + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter: EthcoreFilter = filter.into(); + let mut logs = take_weak!(self.client).logs(filter.clone()) + .into_iter() + .map(From::from) + .collect::>(); - if include_pending { - let pending = pending_logs(&*take_weak!(self.miner), &filter); - logs.extend(pending); - } + if include_pending { + let pending = pending_logs(&*take_weak!(self.miner), &filter); + logs.extend(pending); + } - Ok(to_value(&logs)) - }) + let logs = limit_logs(logs, filter.limit); + + Ok(logs) } - fn work(&self, params: Params) -> Result { + fn work(&self, no_new_work_timeout: Trailing) -> Result { try!(self.active()); - let (no_new_work_timeout,) = from_params::<(u64,)>(params).unwrap_or((0,)); + let no_new_work_timeout = no_new_work_timeout.0; let client = take_weak!(self.client); // check if we're still syncing and return empty strings in that case @@ -550,115 +535,118 @@ impl Eth for EthClient where if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 { Err(errors::no_new_work()) } else if self.options.send_block_number_in_get_work { - let block_number = RpcU256::from(b.block().header().number()); - Ok(to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number))) + let block_number = b.block().header().number(); + Ok(Work { + pow_hash: pow_hash.into(), + seed_hash: seed_hash.into(), + target: target.into(), + number: Some(block_number), + }) } else { - Ok(to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target)))) + Ok(Work { + pow_hash: pow_hash.into(), + seed_hash: seed_hash.into(), + target: target.into(), + number: None + }) } }).unwrap_or(Err(Error::internal_error())) // no work found. } - fn submit_work(&self, params: Params) -> Result { + fn submit_work(&self, nonce: RpcH64, pow_hash: RpcH256, mix_hash: RpcH256) -> Result { try!(self.active()); - from_params::<(RpcH64, RpcH256, RpcH256)>(params).and_then(|(nonce, pow_hash, mix_hash)| { - let nonce: H64 = nonce.into(); - let pow_hash: H256 = pow_hash.into(); - let mix_hash: H256 = mix_hash.into(); - trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); - let miner = take_weak!(self.miner); - let client = take_weak!(self.client); - let seal = vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()]; - let r = miner.submit_seal(&*client, pow_hash, seal); - Ok(to_value(&r.is_ok())) - }) + + let nonce: H64 = nonce.into(); + let pow_hash: H256 = pow_hash.into(); + let mix_hash: H256 = mix_hash.into(); + trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); + + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + let seal = vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()]; + Ok(miner.submit_seal(&*client, pow_hash, seal).is_ok()) } - fn submit_hashrate(&self, params: Params) -> Result { + fn submit_hashrate(&self, rate: RpcU256, id: RpcH256) -> Result { try!(self.active()); - from_params::<(RpcU256, RpcH256)>(params).and_then(|(rate, id)| { - self.external_miner.submit_hashrate(rate.into(), id.into()); - Ok(to_value(&true)) - }) + self.external_miner.submit_hashrate(rate.into(), id.into()); + Ok(true) } - fn send_raw_transaction(&self, params: Params) -> Result { + fn send_raw_transaction(&self, raw: Bytes) -> Result { try!(self.active()); - from_params::<(Bytes, )>(params) - .and_then(|(raw_transaction, )| { - let raw_transaction = raw_transaction.to_vec(); - match UntrustedRlp::new(&raw_transaction).as_val() { - Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction), - Err(_) => Ok(to_value(&RpcH256::from(H256::from(0)))), + + let raw_transaction = raw.to_vec(); + match UntrustedRlp::new(&raw_transaction).as_val() { + Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction), + Err(_) => Ok(RpcH256::from(H256::from(0))), + } + } + + fn call(&self, request: CallRequest, num: Trailing) -> Result { + try!(self.active()); + + let request = CallRequest::into(request); + let signed = try!(self.sign_call(request)); + + let r = match num.0 { + BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + num => take_weak!(self.client).call(&signed, num.into(), Default::default()), + }; + + Ok(r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![]))) + } + + fn estimate_gas(&self, request: CallRequest, num: Trailing) -> Result { + try!(self.active()); + + let request = CallRequest::into(request); + let signed = try!(self.sign_call(request)); + let r = match num.0 { + BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + num => take_weak!(self.client).call(&signed, num.into(), Default::default()), + }; + + Ok(RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0)))) + } + + fn compile_lll(&self, _: String) -> Result { + try!(self.active()); + + rpc_unimplemented!() + } + + fn compile_serpent(&self, _: String) -> Result { + try!(self.active()); + + rpc_unimplemented!() + } + + fn compile_solidity(&self, code: String) -> Result { + try!(self.active()); + let maybe_child = Command::new(SOLC) + .arg("--bin") + .arg("--optimize") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn(); + + maybe_child + .map_err(errors::compilation) + .and_then(|mut child| { + try!(child.stdin.as_mut() + .expect("we called child.stdin(Stdio::piped()) before spawn; qed") + .write_all(code.as_bytes()) + .map_err(errors::compilation)); + let output = try!(child.wait_with_output().map_err(errors::compilation)); + + let s = String::from_utf8_lossy(&output.stdout); + if let Some(hex) = s.lines().skip_while(|ref l| !l.contains("Binary")).skip(1).next() { + Ok(Bytes::new(hex.from_hex().unwrap_or(vec![]))) + } else { + Err(errors::compilation("Unexpected output.")) } - }) - } - - fn call(&self, params: Params) -> Result { - try!(self.active()); - from_params_default_second(params) - .and_then(|(request, block_number,)| { - let request = CallRequest::into(request); - let signed = try!(self.sign_call(request)); - let r = match block_number { - BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), - block_number => take_weak!(self.client).call(&signed, block_number.into(), Default::default()), - }; - Ok(to_value(&r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![])))) - }) - } - - fn estimate_gas(&self, params: Params) -> Result { - try!(self.active()); - from_params_default_second(params) - .and_then(|(request, block_number,)| { - let request = CallRequest::into(request); - let signed = try!(self.sign_call(request)); - let r = match block_number { - BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), - block => take_weak!(self.client).call(&signed, block.into(), Default::default()), - }; - Ok(to_value(&RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0))))) - }) - } - - fn compile_lll(&self, _: Params) -> Result { - try!(self.active()); - rpc_unimplemented!() - } - - fn compile_serpent(&self, _: Params) -> Result { - try!(self.active()); - rpc_unimplemented!() - } - - fn compile_solidity(&self, params: Params) -> Result { - try!(self.active()); - from_params::<(String, )>(params) - .and_then(|(code, )| { - let maybe_child = Command::new(SOLC) - .arg("--bin") - .arg("--optimize") - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn(); - - maybe_child - .map_err(errors::compilation) - .and_then(|mut child| { - try!(child.stdin.as_mut() - .expect("we called child.stdin(Stdio::piped()) before spawn; qed") - .write_all(code.as_bytes()) - .map_err(errors::compilation)); - let output = try!(child.wait_with_output().map_err(errors::compilation)); - - let s = String::from_utf8_lossy(&output.stdout); - if let Some(hex) = s.lines().skip_while(|ref l| !l.contains("Binary")).skip(1).next() { - Ok(to_value(&Bytes::new(hex.from_hex().unwrap_or(vec![])))) - } else { - Err(errors::compilation("Unexpected output.")) - } - }) }) } } diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index a4c834e99..03d9d7215 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -24,9 +24,8 @@ use ethcore::filter::Filter as EthcoreFilter; use ethcore::client::{BlockChainClient, BlockID}; use util::Mutex; use v1::traits::EthFilter; -use v1::types::{BlockNumber, Index, Filter, Log, H256 as RpcH256, U256 as RpcU256}; -use v1::helpers::{PollFilter, PollManager}; -use v1::helpers::params::expect_no_params; +use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256}; +use v1::helpers::{PollFilter, PollManager, limit_logs}; use v1::impls::eth::pending_logs; /// Eth filter rpc implementation. @@ -59,164 +58,154 @@ impl EthFilterClient where } } -impl EthFilter for EthFilterClient where - C: BlockChainClient + 'static, - M: MinerService + 'static { - - fn new_filter(&self, params: Params) -> Result { +impl EthFilter for EthFilterClient + where C: BlockChainClient + 'static, M: MinerService + 'static +{ + fn new_filter(&self, filter: Filter) -> Result { try!(self.active()); - from_params::<(Filter,)>(params) - .and_then(|(filter,)| { - let mut polls = self.polls.lock(); - let block_number = take_weak!(self.client).chain_info().best_block_number; - let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter)); - Ok(to_value(&RpcU256::from(id))) - }) + let mut polls = self.polls.lock(); + let block_number = take_weak!(self.client).chain_info().best_block_number; + let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter)); + Ok(id.into()) } - fn new_block_filter(&self, params: Params) -> Result { + fn new_block_filter(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let mut polls = self.polls.lock(); let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); - Ok(to_value(&RpcU256::from(id))) + Ok(id.into()) } - fn new_pending_transaction_filter(&self, params: Params) -> Result { + fn new_pending_transaction_filter(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let mut polls = self.polls.lock(); let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); - - Ok(to_value(&RpcU256::from(id))) + Ok(id.into()) } - fn filter_changes(&self, params: Params) -> Result { + fn filter_changes(&self, index: Index) -> Result { try!(self.active()); let client = take_weak!(self.client); - from_params::<(Index,)>(params) - .and_then(|(index,)| { - let mut polls = self.polls.lock(); - match polls.poll_mut(&index.value()) { - None => Ok(Value::Array(vec![] as Vec)), - Some(filter) => match *filter { - PollFilter::Block(ref mut block_number) => { - // + 1, cause we want to return hashes including current block hash. - let current_number = client.chain_info().best_block_number + 1; - let hashes = (*block_number..current_number).into_iter() - .map(BlockID::Number) - .filter_map(|id| client.block_hash(id)) - .map(Into::into) - .collect::>(); + let mut polls = self.polls.lock(); + match polls.poll_mut(&index.value()) { + None => Ok(FilterChanges::Empty), + Some(filter) => match *filter { + PollFilter::Block(ref mut block_number) => { + // + 1, cause we want to return hashes including current block hash. + let current_number = client.chain_info().best_block_number + 1; + let hashes = (*block_number..current_number).into_iter() + .map(BlockID::Number) + .filter_map(|id| client.block_hash(id)) + .map(Into::into) + .collect::>(); - *block_number = current_number; + *block_number = current_number; - Ok(to_value(&hashes)) - }, - PollFilter::PendingTransaction(ref mut previous_hashes) => { - // get hashes of pending transactions - let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); + Ok(FilterChanges::Hashes(hashes)) + }, + PollFilter::PendingTransaction(ref mut previous_hashes) => { + // get hashes of pending transactions + let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); - let new_hashes = - { - let previous_hashes_set = previous_hashes.iter().collect::>(); + let new_hashes = + { + let previous_hashes_set = previous_hashes.iter().collect::>(); - // find all new hashes - current_hashes - .iter() - .filter(|hash| !previous_hashes_set.contains(hash)) - .cloned() - .map(Into::into) - .collect::>() - }; + // find all new hashes + current_hashes + .iter() + .filter(|hash| !previous_hashes_set.contains(hash)) + .cloned() + .map(Into::into) + .collect::>() + }; - // save all hashes of pending transactions - *previous_hashes = current_hashes; + // save all hashes of pending transactions + *previous_hashes = current_hashes; - // return new hashes - Ok(to_value(&new_hashes)) - }, - PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { - // retrive the current block number - let current_number = client.chain_info().best_block_number; + // return new hashes + Ok(FilterChanges::Hashes(new_hashes)) + }, + PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { + // retrive the current block number + let current_number = client.chain_info().best_block_number; - // check if we need to check pending hashes - let include_pending = filter.to_block == Some(BlockNumber::Pending); + // check if we need to check pending hashes + let include_pending = filter.to_block == Some(BlockNumber::Pending); - // build appropriate filter - let mut filter: EthcoreFilter = filter.clone().into(); - filter.from_block = BlockID::Number(*block_number); - filter.to_block = BlockID::Latest; + // build appropriate filter + let mut filter: EthcoreFilter = filter.clone().into(); + filter.from_block = BlockID::Number(*block_number); + filter.to_block = BlockID::Latest; - // retrieve logs in range from_block..min(BlockID::Latest..to_block) - let mut logs = client.logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); + // retrieve logs in range from_block..min(BlockID::Latest..to_block) + let mut logs = client.logs(filter.clone()) + .into_iter() + .map(From::from) + .collect::>(); - // additionally retrieve pending logs - if include_pending { - let pending_logs = pending_logs(&*take_weak!(self.miner), &filter); + // additionally retrieve pending logs + if include_pending { + let pending_logs = pending_logs(&*take_weak!(self.miner), &filter); - // remove logs about which client was already notified about - let new_pending_logs: Vec<_> = pending_logs.iter() - .filter(|p| !previous_logs.contains(p)) - .cloned() - .collect(); + // remove logs about which client was already notified about + let new_pending_logs: Vec<_> = pending_logs.iter() + .filter(|p| !previous_logs.contains(p)) + .cloned() + .collect(); - // save all logs retrieved by client - *previous_logs = pending_logs.into_iter().collect(); + // save all logs retrieved by client + *previous_logs = pending_logs.into_iter().collect(); - // append logs array with new pending logs - logs.extend(new_pending_logs); - } - - // save the number of the next block as a first block from which - // we want to get logs - *block_number = current_number + 1; - - Ok(to_value(&logs)) - } + // append logs array with new pending logs + logs.extend(new_pending_logs); } + + let logs = limit_logs(logs, filter.limit); + + // save the number of the next block as a first block from which + // we want to get logs + *block_number = current_number + 1; + + Ok(FilterChanges::Logs(logs)) } - }) + } + } } - fn filter_logs(&self, params: Params) -> Result { + fn filter_logs(&self, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(Index,)>(params) - .and_then(|(index,)| { - let mut polls = self.polls.lock(); - match polls.poll(&index.value()) { - Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = filter.clone().into(); - let mut logs = take_weak!(self.client).logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); - if include_pending { - logs.extend(pending_logs(&*take_weak!(self.miner), &filter)); - } + let mut polls = self.polls.lock(); + match polls.poll(&index.value()) { + Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter: EthcoreFilter = filter.clone().into(); + let mut logs = take_weak!(self.client).logs(filter.clone()) + .into_iter() + .map(From::from) + .collect::>(); - Ok(to_value(&logs)) - }, - // just empty array - _ => Ok(Value::Array(vec![] as Vec)), + if include_pending { + logs.extend(pending_logs(&*take_weak!(self.miner), &filter)); } - }) + + let logs = limit_logs(logs, filter.limit); + + Ok(logs) + }, + // just empty array + _ => Ok(Vec::new()), + } } - fn uninstall_filter(&self, params: Params) -> Result { + fn uninstall_filter(&self, index: Index) -> Result { try!(self.active()); - from_params::<(Index,)>(params) - .map(|(index,)| { - self.polls.lock().remove_poll(&index.value()); - to_value(&true) - }) + + self.polls.lock().remove_poll(&index.value()); + Ok(true) } } diff --git a/rpc/src/v1/impls/eth_signing.rs b/rpc/src/v1/impls/eth_signing.rs index c19b5819d..9290a9425 100644 --- a/rpc/src/v1/impls/eth_signing.rs +++ b/rpc/src/v1/impls/eth_signing.rs @@ -23,10 +23,10 @@ use ethcore::client::MiningBlockChainClient; use util::{U256, Address, H256, Mutex}; use transient_hashmap::TransientHashMap; use ethcore::account_provider::AccountProvider; -use v1::helpers::{errors, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationsQueue, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest}; +use v1::helpers::{errors, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest, SignerService}; use v1::helpers::dispatch::{default_gas_price, sign_and_dispatch}; use v1::traits::EthSigning; -use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256}; +use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256, Bytes as RpcBytes}; fn fill_optional_fields(request: TRequest, client: &C, miner: &M) -> FilledRequest where C: MiningBlockChainClient, M: MinerService { @@ -43,7 +43,7 @@ fn fill_optional_fields(request: TRequest, client: &C, miner: &M) -> Fille /// Implementation of functions that require signing when no trusted signer is used. pub struct EthSigningQueueClient where C: MiningBlockChainClient, M: MinerService { - queue: Weak, + signer: Weak, accounts: Weak, client: Weak, miner: Weak, @@ -60,9 +60,9 @@ pub enum DispatchResult { impl EthSigningQueueClient where C: MiningBlockChainClient, M: MinerService { /// Creates a new signing queue client given shared signing queue. - pub fn new(queue: &Arc, client: &Arc, miner: &Arc, accounts: &Arc) -> Self { + pub fn new(signer: &Arc, client: &Arc, miner: &Arc, accounts: &Arc) -> Self { EthSigningQueueClient { - queue: Arc::downgrade(queue), + signer: Arc::downgrade(signer), accounts: Arc::downgrade(accounts), client: Arc::downgrade(client), miner: Arc::downgrade(miner), @@ -86,8 +86,8 @@ impl EthSigningQueueClient where C: MiningBlockChainClient, M: Miner return Ok(DispatchResult::Value(to_value(&accounts.sign(address, msg).ok().map_or_else(RpcH520::default, Into::into)))) } - let queue = take_weak!(self.queue); - queue.add_request(ConfirmationPayload::Sign(address, msg)) + let signer = take_weak!(self.signer); + signer.add_request(ConfirmationPayload::Sign(address, msg)) .map(DispatchResult::Promise) .map_err(|_| errors::request_rejected_limit()) }) @@ -105,9 +105,9 @@ impl EthSigningQueueClient where C: MiningBlockChainClient, M: Miner return sign_and_dispatch(&*client, &*miner, request, &*accounts, sender).map(DispatchResult::Value); } - let queue = take_weak!(self.queue); + let signer = take_weak!(self.signer); let request = fill_optional_fields(request, &*client, &*miner); - queue.add_request(ConfirmationPayload::Transaction(request)) + signer.add_request(ConfirmationPayload::Transaction(request)) .map(DispatchResult::Promise) .map_err(|_| errors::request_rejected_limit()) }) @@ -168,6 +168,13 @@ impl EthSigning for EthSigningQueueClient }) } + fn decrypt_message(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(RpcH160, RpcBytes)>(params).and_then(|(_account, _ciphertext)| { + Err(errors::unimplemented()) + }) + } + fn check_request(&self, params: Params) -> Result { try!(self.active()); let mut pending = self.pending.lock(); @@ -241,6 +248,14 @@ impl EthSigning for EthSigningUnsafeClient where })) } + fn decrypt_message(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(RpcH160, RpcBytes)>(params).and_then(|(address, ciphertext)| { + let s = try!(take_weak!(self.accounts).decrypt(address.into(), &[0; 0], &ciphertext.0).map_err(|_| Error::internal_error())); + Ok(to_value(RpcBytes::from(s))) + }) + } + fn post_sign(&self, _: Params) -> Result { // We don't support this in non-signer mode. Err(errors::signer_disabled()) diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index ee352e65a..220ead3dd 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -21,6 +21,7 @@ use std::collections::{BTreeMap}; use util::{RotatingLogger, Address}; use util::misc::version_data; +use crypto::ecies; use ethkey::{Brain, Generator}; use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; @@ -29,8 +30,8 @@ use ethcore::client::{MiningBlockChainClient}; use jsonrpc_core::*; use v1::traits::Ethcore; -use v1::types::{Bytes, U256, H160, Peers}; -use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, NetworkSettings}; +use v1::types::{Bytes, U256, H160, H512, Peers, Transaction}; +use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::params::expect_no_params; /// Ethcore implementation. @@ -45,7 +46,7 @@ pub struct EthcoreClient where net: Weak, logger: Arc, settings: Arc, - confirmations_queue: Option>, + signer: Option>, } impl EthcoreClient where C: MiningBlockChainClient, M: MinerService, S: SyncProvider { @@ -57,7 +58,7 @@ impl EthcoreClient where C: MiningBlockChainClient, M: net: &Arc, logger: Arc, settings: Arc, - queue: Option> + signer: Option> ) -> Self { EthcoreClient { client: Arc::downgrade(client), @@ -66,7 +67,7 @@ impl EthcoreClient where C: MiningBlockChainClient, M: net: Arc::downgrade(net), logger: logger, settings: settings, - confirmations_queue: queue, + signer: signer, } } @@ -198,9 +199,9 @@ impl Ethcore for EthcoreClient where M: MinerService + try!(self.active()); try!(expect_no_params(params)); - match self.confirmations_queue { + match self.signer { None => Err(errors::signer_disabled()), - Some(ref queue) => Ok(to_value(&queue.len())), + Some(ref signer) => Ok(to_value(&signer.len())), } } @@ -217,4 +218,19 @@ impl Ethcore for EthcoreClient where M: MinerService + to_value(&H160::from(Brain::new(phrase).generate().unwrap().address())) ) } + + fn encrypt_message(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| { + let s = try!(ecies::encrypt(&key.into(), &[0; 0], &phrase.0).map_err(|_| Error::internal_error())); + Ok(to_value(&Bytes::from(s))) + }) + } + + fn pending_transactions(&self, params: Params) -> Result { + try!(self.active()); + try!(expect_no_params(params)); + + Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::>())) + } } diff --git a/rpc/src/v1/impls/personal_signer.rs b/rpc/src/v1/impls/personal_signer.rs index 5cfda9a65..441ed679b 100644 --- a/rpc/src/v1/impls/personal_signer.rs +++ b/rpc/src/v1/impls/personal_signer.rs @@ -23,13 +23,13 @@ use ethcore::client::MiningBlockChainClient; use ethcore::miner::MinerService; use v1::traits::PersonalSigner; use v1::types::{TransactionModification, ConfirmationRequest, U256}; -use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, ConfirmationPayload}; +use v1::helpers::{errors, SignerService, SigningQueue, ConfirmationPayload}; use v1::helpers::params::expect_no_params; use v1::helpers::dispatch::{unlock_sign_and_dispatch, signature_with_password}; /// Transactions confirmation (personal) rpc implementation. pub struct SignerClient where C: MiningBlockChainClient, M: MinerService { - queue: Weak, + signer: Weak, accounts: Weak, client: Weak, miner: Weak, @@ -38,9 +38,14 @@ pub struct SignerClient where C: MiningBlockChainClient, M: MinerService { impl SignerClient where C: MiningBlockChainClient, M: MinerService { /// Create new instance of signer client. - pub fn new(store: &Arc, client: &Arc, miner: &Arc, queue: &Arc) -> Self { + pub fn new( + store: &Arc, + client: &Arc, + miner: &Arc, + signer: &Arc, + ) -> Self { SignerClient { - queue: Arc::downgrade(queue), + signer: Arc::downgrade(signer), accounts: Arc::downgrade(store), client: Arc::downgrade(client), miner: Arc::downgrade(miner), @@ -59,8 +64,8 @@ impl PersonalSigner for SignerClient where C: Mini fn requests_to_confirm(&self, params: Params) -> Result { try!(self.active()); try!(expect_no_params(params)); - let queue = take_weak!(self.queue); - Ok(to_value(&queue.requests().into_iter().map(From::from).collect::>())) + let signer = take_weak!(self.signer); + Ok(to_value(&signer.requests().into_iter().map(From::from).collect::>())) } fn confirm_request(&self, params: Params) -> Result { @@ -71,11 +76,11 @@ impl PersonalSigner for SignerClient where C: Mini |(id, modification, pass)| { let id = id.into(); let accounts = take_weak!(self.accounts); - let queue = take_weak!(self.queue); + let signer = take_weak!(self.signer); let client = take_weak!(self.client); let miner = take_weak!(self.miner); - queue.peek(&id).map(|confirmation| { + signer.peek(&id).map(|confirmation| { let result = match confirmation.payload { ConfirmationPayload::Transaction(mut request) => { // apply modification @@ -90,7 +95,7 @@ impl PersonalSigner for SignerClient where C: Mini } }; if let Ok(ref response) = result { - queue.request_confirmed(id, Ok(response.clone())); + signer.request_confirmed(id, Ok(response.clone())); } result }).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id))) @@ -102,11 +107,20 @@ impl PersonalSigner for SignerClient where C: Mini try!(self.active()); from_params::<(U256, )>(params).and_then( |(id, )| { - let queue = take_weak!(self.queue); - let res = queue.request_rejected(id.into()); + let signer = take_weak!(self.signer); + let res = signer.request_rejected(id.into()); Ok(to_value(&res.is_some())) } ) } + + fn generate_token(&self, params: Params) -> Result { + try!(self.active()); + try!(expect_no_params(params)); + let signer = take_weak!(self.signer); + signer.generate_token() + .map(|token| to_value(&token)) + .map_err(|e| errors::token(e)) + } } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 897fcf623..889b7840b 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -28,4 +28,4 @@ pub mod types; pub use self::traits::{Web3, Eth, EthFilter, EthSigning, Personal, PersonalSigner, Net, Ethcore, EthcoreSet, Traces, Rpc}; pub use self::impls::*; -pub use self::helpers::{SigningQueue, ConfirmationsQueue, NetworkSettings}; +pub use self::helpers::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings}; diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index e7888a8c5..eb3fbaf6e 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -27,7 +27,7 @@ use ethcore::receipt::LocalizedReceipt; use ethcore::transaction::{Transaction, Action}; use ethcore::miner::{ExternalMiner, MinerService}; use ethsync::SyncState; -use v1::{Eth, EthClient, EthClientOptions, EthSigning, EthSigningUnsafeClient}; +use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, EthSigningUnsafeClient}; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; use rustc_serialize::hex::ToHex; use time::get_time; @@ -76,10 +76,12 @@ impl EthTester { let hashrates = Arc::new(Mutex::new(HashMap::new())); let external_miner = Arc::new(ExternalMiner::new(hashrates.clone())); let eth = EthClient::new(&client, &sync, &ap, &miner, &external_miner, options).to_delegate(); + let filter = EthFilterClient::new(&client, &miner).to_delegate(); let sign = EthSigningUnsafeClient::new(&client, &ap, &miner).to_delegate(); let io = IoHandler::new(); io.add_delegate(eth); io.add_delegate(sign); + io.add_delegate(filter); EthTester { client: client, @@ -149,6 +151,93 @@ fn rpc_eth_hashrate() { assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); } +#[test] +fn rpc_eth_logs() { + let tester = EthTester::default(); + tester.client.set_logs(vec![LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }, LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }]); + + + let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#; + let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1}], "id": 1}"#; + let request3 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":0}], "id": 1}"#; + + let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + let response3 = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request1), Some(response1.to_owned())); + assert_eq!(tester.io.handle_request_sync(request2), Some(response2.to_owned())); + assert_eq!(tester.io.handle_request_sync(request3), Some(response3.to_owned())); +} + +#[test] +fn rpc_logs_filter() { + let tester = EthTester::default(); + // Set some logs + tester.client.set_logs(vec![LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }, LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }]); + + // Register filters first + let request_default = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{}], "id": 1}"#; + let request_limit = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{"limit":1}], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request_default), Some(response1.to_owned())); + assert_eq!(tester.io.handle_request_sync(request_limit), Some(response2.to_owned())); + + let request_changes1 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#; + let request_changes2 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x1"], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request_changes1), Some(response1.to_owned())); + assert_eq!(tester.io.handle_request_sync(request_changes2), Some(response2.to_owned())); +} + #[test] fn rpc_eth_submit_hashrate() { let tester = EthTester::default(); @@ -368,7 +457,7 @@ fn rpc_eth_pending_transaction_by_hash() { tester.miner.pending_transactions.lock().insert(H256::zero(), tx); } - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0xa"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0xa"},"id":1}"#; let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionByHash", diff --git a/rpc/src/v1/tests/mocked/eth_signing.rs b/rpc/src/v1/tests/mocked/eth_signing.rs index f06d4027a..1bf901e5f 100644 --- a/rpc/src/v1/tests/mocked/eth_signing.rs +++ b/rpc/src/v1/tests/mocked/eth_signing.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use jsonrpc_core::{IoHandler, to_value}; use v1::impls::EthSigningQueueClient; use v1::traits::EthSigning; -use v1::helpers::{ConfirmationsQueue, SigningQueue}; +use v1::helpers::{SignerService, SigningQueue}; use v1::types::{H256 as RpcH256, H520 as RpcH520}; use v1::tests::helpers::TestMinerService; use util::{Address, FixedHash, Uint, U256, H256, H520}; @@ -28,7 +28,7 @@ use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Transaction, Action}; struct EthSigningTester { - pub queue: Arc, + pub signer: Arc, pub client: Arc, pub miner: Arc, pub accounts: Arc, @@ -37,15 +37,15 @@ struct EthSigningTester { impl Default for EthSigningTester { fn default() -> Self { - let queue = Arc::new(ConfirmationsQueue::default()); + let signer = Arc::new(SignerService::new_test()); let client = Arc::new(TestBlockChainClient::default()); let miner = Arc::new(TestMinerService::default()); let accounts = Arc::new(AccountProvider::transient_provider()); let io = IoHandler::new(); - io.add_delegate(EthSigningQueueClient::new(&queue, &client, &miner, &accounts).to_delegate()); + io.add_delegate(EthSigningQueueClient::new(&signer, &client, &miner, &accounts).to_delegate()); EthSigningTester { - queue: queue, + signer: signer, client: client, miner: miner, accounts: accounts, @@ -63,7 +63,7 @@ fn should_add_sign_to_queue() { // given let tester = eth_signing(); let address = Address::random(); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); // when let request = r#"{ @@ -79,9 +79,9 @@ fn should_add_sign_to_queue() { // then let async_result = tester.io.handle_request(&request).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // respond - tester.queue.request_confirmed(U256::from(1), Ok(to_value(&RpcH520::from(H520::default())))); + tester.signer.request_confirmed(U256::from(1), Ok(to_value(&RpcH520::from(H520::default())))); assert!(async_result.on_result(move |res| { assert_eq!(res, response.to_owned()); })); @@ -92,7 +92,7 @@ fn should_post_sign_to_queue() { // given let tester = eth_signing(); let address = Address::random(); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); // when let request = r#"{ @@ -108,7 +108,7 @@ fn should_post_sign_to_queue() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); } #[test] @@ -155,7 +155,7 @@ fn should_check_status_of_request_when_its_resolved() { "id": 1 }"#; tester.io.handle_request_sync(&request).expect("Sent"); - tester.queue.request_confirmed(U256::from(1), Ok(to_value(&"Hello World!"))); + tester.signer.request_confirmed(U256::from(1), Ok(to_value(&"Hello World!"))); // when let request = r#"{ @@ -192,7 +192,7 @@ fn should_sign_if_account_is_unlocked() { }"#; let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{}", signature).as_ref() + r#"","id":1}"#; assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); } #[test] @@ -200,7 +200,7 @@ fn should_add_transaction_to_queue() { // given let tester = eth_signing(); let address = Address::random(); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); // when let request = r#"{ @@ -219,9 +219,9 @@ fn should_add_transaction_to_queue() { // then let async_result = tester.io.handle_request(&request).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // respond - tester.queue.request_confirmed(U256::from(1), Ok(to_value(&RpcH256::from(H256::default())))); + tester.signer.request_confirmed(U256::from(1), Ok(to_value(&RpcH256::from(H256::default())))); assert!(async_result.on_result(move |res| { assert_eq!(res, response.to_owned()); })); diff --git a/rpc/src/v1/tests/mocked/ethcore.rs b/rpc/src/v1/tests/mocked/ethcore.rs index d8121b6d6..811ccced4 100644 --- a/rpc/src/v1/tests/mocked/ethcore.rs +++ b/rpc/src/v1/tests/mocked/ethcore.rs @@ -22,7 +22,7 @@ use ethcore::client::{TestBlockChainClient}; use jsonrpc_core::IoHandler; use v1::{Ethcore, EthcoreClient}; -use v1::helpers::{ConfirmationsQueue, NetworkSettings}; +use v1::helpers::{SignerService, NetworkSettings}; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; use super::manage_network::TestManageNetwork; @@ -262,8 +262,8 @@ fn rpc_ethcore_unsigned_transactions_count() { let sync = sync_provider(); let net = network_service(); let io = IoHandler::new(); - let queue = Arc::new(ConfirmationsQueue::default()); - let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(queue)).to_delegate(); + let signer = Arc::new(SignerService::new_test()); + let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(signer)).to_delegate(); io.add_delegate(ethcore); let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#; @@ -286,3 +286,18 @@ fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() { assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } + +#[test] +fn rpc_ethcore_pending_transactions() { + let miner = miner_service(); + let client = client_service(); + let sync = sync_provider(); + let net = network_service(); + let io = IoHandler::new(); + io.add_delegate(ethcore_client(&client, &miner, &sync, &net).to_delegate()); + + let request = r#"{"jsonrpc": "2.0", "method": "ethcore_pendingTransactions", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/mocked/personal_signer.rs b/rpc/src/v1/tests/mocked/personal_signer.rs index 976b232cc..04ae829ee 100644 --- a/rpc/src/v1/tests/mocked/personal_signer.rs +++ b/rpc/src/v1/tests/mocked/personal_signer.rs @@ -23,10 +23,10 @@ use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Transaction, Action}; use v1::{SignerClient, PersonalSigner}; use v1::tests::helpers::TestMinerService; -use v1::helpers::{SigningQueue, ConfirmationsQueue, FilledTransactionRequest, ConfirmationPayload}; +use v1::helpers::{SigningQueue, SignerService, FilledTransactionRequest, ConfirmationPayload}; struct PersonalSignerTester { - queue: Arc, + signer: Arc, accounts: Arc, io: IoHandler, miner: Arc, @@ -49,16 +49,16 @@ fn miner_service() -> Arc { } fn signer_tester() -> PersonalSignerTester { - let queue = Arc::new(ConfirmationsQueue::default()); + let signer = Arc::new(SignerService::new_test()); let accounts = accounts_provider(); let client = blockchain_client(); let miner = miner_service(); let io = IoHandler::new(); - io.add_delegate(SignerClient::new(&accounts, &client, &miner, &queue).to_delegate()); + io.add_delegate(SignerClient::new(&accounts, &client, &miner, &signer).to_delegate()); PersonalSignerTester { - queue: queue, + signer: signer, accounts: accounts, io: io, miner: miner, @@ -71,7 +71,7 @@ fn signer_tester() -> PersonalSignerTester { fn should_return_list_of_items_to_confirm() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: Address::from(1), to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: U256::from(10_000), @@ -80,7 +80,7 @@ fn should_return_list_of_items_to_confirm() { data: vec![], nonce: None, })).unwrap(); - tester.queue.add_request(ConfirmationPayload::Sign(1.into(), 5.into())).unwrap(); + tester.signer.add_request(ConfirmationPayload::Sign(1.into(), 5.into())).unwrap(); // when let request = r#"{"jsonrpc":"2.0","method":"personal_requestsToConfirm","params":[],"id":1}"#; @@ -100,7 +100,7 @@ fn should_return_list_of_items_to_confirm() { fn should_reject_transaction_from_queue_without_dispatching() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: Address::from(1), to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: U256::from(10_000), @@ -109,7 +109,7 @@ fn should_reject_transaction_from_queue_without_dispatching() { data: vec![], nonce: None, })).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{"jsonrpc":"2.0","method":"personal_rejectRequest","params":["0x1"],"id":1}"#; @@ -117,7 +117,7 @@ fn should_reject_transaction_from_queue_without_dispatching() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); assert_eq!(tester.miner.imported_transactions.lock().len(), 0); } @@ -125,7 +125,7 @@ fn should_reject_transaction_from_queue_without_dispatching() { fn should_not_remove_transaction_if_password_is_invalid() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: Address::from(1), to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: U256::from(10_000), @@ -134,7 +134,7 @@ fn should_not_remove_transaction_if_password_is_invalid() { data: vec![], nonce: None, })).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; @@ -142,15 +142,15 @@ fn should_not_remove_transaction_if_password_is_invalid() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); } #[test] fn should_not_remove_sign_if_password_is_invalid() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Sign(0.into(), 5.into())).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + tester.signer.add_request(ConfirmationPayload::Sign(0.into(), 5.into())).unwrap(); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; @@ -158,7 +158,7 @@ fn should_not_remove_sign_if_password_is_invalid() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); } #[test] @@ -167,7 +167,7 @@ fn should_confirm_transaction_and_dispatch() { let tester = signer_tester(); let address = tester.accounts.new_account("test").unwrap(); let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: address, to: Some(recipient), gas_price: U256::from(10_000), @@ -189,7 +189,7 @@ fn should_confirm_transaction_and_dispatch() { let signature = tester.accounts.sign(address, t.hash()).unwrap(); let t = t.with_signature(signature); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{ @@ -202,7 +202,24 @@ fn should_confirm_transaction_and_dispatch() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } +#[test] +fn should_generate_new_token() { + // given + let tester = signer_tester(); + + // when + let request = r#"{ + "jsonrpc":"2.0", + "method":"personal_generateAuthorizationToken", + "params":[], + "id":1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"new_token","id":1}"#; + + // then + assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 610591f1f..80789fd0e 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -18,186 +18,185 @@ use std::sync::Arc; use jsonrpc_core::*; -/// Eth rpc interface. -pub trait Eth: Sized + Send + Sync + 'static { - /// Returns protocol version. - fn protocol_version(&self, _: Params) -> Result; +use v1::types::{Block, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index}; +use v1::types::{Log, Receipt, SyncStatus, Transaction, Work}; +use v1::types::{H64, H160, H256, U256}; - /// Returns an object with data about the sync status or false. (wtf?) - fn syncing(&self, _: Params) -> Result; +use v1::helpers::auto_args::{Trailing, Wrap}; - /// Returns the number of hashes per second that the node is mining with. - fn hashrate(&self, _: Params) -> Result; +build_rpc_trait! { + /// Eth rpc interface. + pub trait Eth { + /// Returns protocol version encoded as a string (quotes are necessary). + #[name("eth_protocolVersion")] + fn protocol_version(&self) -> Result; - /// Returns block author. - fn author(&self, _: Params) -> Result; + /// Returns an object with data about the sync status or false. (wtf?) + #[name("eth_syncing")] + fn syncing(&self) -> Result; - /// Returns true if client is actively mining new blocks. - fn is_mining(&self, _: Params) -> Result; + /// Returns the number of hashes per second that the node is mining with. + #[name("eth_hashrate")] + fn hashrate(&self) -> Result; - /// Returns current gas_price. - fn gas_price(&self, _: Params) -> Result; + /// Returns block author. + #[name("eth_coinbase")] + fn author(&self) -> Result; - /// Returns accounts list. - fn accounts(&self, _: Params) -> Result; + /// Returns true if client is actively mining new blocks. + #[name("eth_mining")] + fn is_mining(&self) -> Result; - /// Returns highest block number. - fn block_number(&self, _: Params) -> Result; + /// Returns current gas_price. + #[name("eth_gasPrice")] + fn gas_price(&self) -> Result; - /// Returns balance of the given account. - fn balance(&self, _: Params) -> Result; + /// Returns accounts list. + #[name("eth_accounts")] + fn accounts(&self) -> Result, Error>; - /// Returns content of the storage at given address. - fn storage_at(&self, _: Params) -> Result; + /// Returns highest block number. + #[name("eth_blockNumber")] + fn block_number(&self) -> Result; - /// Returns block with given hash. - fn block_by_hash(&self, _: Params) -> Result; + /// Returns balance of the given account. + #[name("eth_getBalance")] + fn balance(&self, H160, Trailing) -> Result; - /// Returns block with given number. - fn block_by_number(&self, _: Params) -> Result; + /// Returns content of the storage at given address. + #[name("eth_getStorageAt")] + fn storage_at(&self, H160, U256, Trailing) -> Result; - /// Returns the number of transactions sent from given address at given time (block number). - fn transaction_count(&self, _: Params) -> Result; + /// Returns block with given hash. + #[name("eth_getBlockByHash")] + fn block_by_hash(&self, H256, bool) -> Result, Error>; - /// Returns the number of transactions in a block with given hash. - fn block_transaction_count_by_hash(&self, _: Params) -> Result; + /// Returns block with given number. + #[name("eth_getBlockByNumber")] + fn block_by_number(&self, BlockNumber, bool) -> Result, Error>; - /// Returns the number of transactions in a block with given block number. - fn block_transaction_count_by_number(&self, _: Params) -> Result; + /// Returns the number of transactions sent from given address at given time (block number). + #[name("eth_getTransactionCount")] + fn transaction_count(&self, H160, Trailing) -> Result; - /// Returns the number of uncles in a block with given hash. - fn block_uncles_count_by_hash(&self, _: Params) -> Result; + /// Returns the number of transactions in a block with given hash. + #[name("eth_getBlockTransactionCountByHash")] + fn block_transaction_count_by_hash(&self, H256) -> Result, Error>; - /// Returns the number of uncles in a block with given block number. - fn block_uncles_count_by_number(&self, _: Params) -> Result; + /// Returns the number of transactions in a block with given block number. + #[name("eth_getBlockTransactionCountByNumber")] + fn block_transaction_count_by_number(&self, BlockNumber) -> Result, Error>; - /// Returns the code at given address at given time (block number). - fn code_at(&self, _: Params) -> Result; + /// Returns the number of uncles in a block with given hash. + #[name("eth_getUncleCountByBlockHash")] + fn block_uncles_count_by_hash(&self, H256) -> Result, Error>; - /// Sends signed transaction. - fn send_raw_transaction(&self, _: Params) -> Result; + /// Returns the number of uncles in a block with given block number. + #[name("eth_getUncleCountByBlockNumber")] + fn block_uncles_count_by_number(&self, BlockNumber) -> Result, Error>; - /// Call contract. - fn call(&self, _: Params) -> Result; + /// Returns the code at given address at given time (block number). + #[name("eth_getCode")] + fn code_at(&self, H160, Trailing) -> Result; - /// Estimate gas needed for execution of given contract. - fn estimate_gas(&self, _: Params) -> Result; + /// Sends signed transaction, returning its hash. + #[name("eth_sendRawTransaction")] + fn send_raw_transaction(&self, Bytes) -> Result; - /// Get transaction by its hash. - fn transaction_by_hash(&self, _: Params) -> Result; + /// Call contract, returning the output data. + #[name("eth_call")] + fn call(&self, CallRequest, Trailing) -> Result; - /// Returns transaction at given block hash and index. - fn transaction_by_block_hash_and_index(&self, _: Params) -> Result; + /// Estimate gas needed for execution of given contract. + #[name("eth_estimateGas")] + fn estimate_gas(&self, CallRequest, Trailing) -> Result; - /// Returns transaction by given block number and index. - fn transaction_by_block_number_and_index(&self, _: Params) -> Result; + /// Get transaction by its hash. + #[name("eth_getTransactionByHash")] + fn transaction_by_hash(&self, H256) -> Result, Error>; - /// Returns transaction receipt. - fn transaction_receipt(&self, _: Params) -> Result; + /// Returns transaction at given block hash and index. + #[name("eth_getTransactionByBlockHashAndIndex")] + fn transaction_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; - /// Returns an uncles at given block and index. - fn uncle_by_block_hash_and_index(&self, _: Params) -> Result; + /// Returns transaction by given block number and index. + #[name("eth_getTransactionByBlockNumberAndIndex")] + fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; - /// Returns an uncles at given block and index. - fn uncle_by_block_number_and_index(&self, _: Params) -> Result; + /// Returns transaction receipt. + #[name("eth_getTransactionReceipt")] + fn transaction_receipt(&self, H256) -> Result, Error>; - /// Returns available compilers. - fn compilers(&self, _: Params) -> Result; + /// Returns an uncles at given block and index. + #[name("eth_getUncleByBlockHashAndIndex")] + fn uncle_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; - /// Compiles lll code. - fn compile_lll(&self, _: Params) -> Result; + /// Returns an uncles at given block and index. + #[name("eth_getUncleByBlockNumberAndIndex")] + fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; - /// Compiles solidity. - fn compile_solidity(&self, _: Params) -> Result; + /// Returns available compilers. + #[name("eth_getCompilers")] + fn compilers(&self) -> Result, Error>; - /// Compiles serpent. - fn compile_serpent(&self, _: Params) -> Result; + /// Compiles lll code. + #[name("eth_compileLLL")] + fn compile_lll(&self, String) -> Result; - /// Returns logs matching given filter object. - fn logs(&self, _: Params) -> Result; + /// Compiles solidity. + #[name("eth_compileSolidity")] + fn compile_solidity(&self, String) -> Result; - /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. - fn work(&self, _: Params) -> Result; + /// Compiles serpent. + #[name("eth_compileSerpent")] + fn compile_serpent(&self, String) -> Result; - /// Used for submitting a proof-of-work solution. - fn submit_work(&self, _: Params) -> Result; + /// Returns logs matching given filter object. + #[name("eth_getLogs")] + fn logs(&self, Filter) -> Result, Error>; - /// Used for submitting mining hashrate. - fn submit_hashrate(&self, _: Params) -> Result; + /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. + #[name("eth_getWork")] + fn work(&self, Trailing) -> Result; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("eth_protocolVersion", Eth::protocol_version); - delegate.add_method("eth_syncing", Eth::syncing); - delegate.add_method("eth_hashrate", Eth::hashrate); - delegate.add_method("eth_coinbase", Eth::author); - delegate.add_method("eth_mining", Eth::is_mining); - delegate.add_method("eth_gasPrice", Eth::gas_price); - delegate.add_method("eth_accounts", Eth::accounts); - delegate.add_method("eth_blockNumber", Eth::block_number); - delegate.add_method("eth_getBalance", Eth::balance); - delegate.add_method("eth_getStorageAt", Eth::storage_at); - delegate.add_method("eth_getTransactionCount", Eth::transaction_count); - delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); - delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); - delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); - delegate.add_method("eth_getCode", Eth::code_at); - delegate.add_method("eth_sendRawTransaction", Eth::send_raw_transaction); - delegate.add_method("eth_call", Eth::call); - delegate.add_method("eth_estimateGas", Eth::estimate_gas); - delegate.add_method("eth_getBlockByHash", Eth::block_by_hash); - delegate.add_method("eth_getBlockByNumber", Eth::block_by_number); - delegate.add_method("eth_getTransactionByHash", Eth::transaction_by_hash); - delegate.add_method("eth_getTransactionByBlockHashAndIndex", Eth::transaction_by_block_hash_and_index); - delegate.add_method("eth_getTransactionByBlockNumberAndIndex", Eth::transaction_by_block_number_and_index); - delegate.add_method("eth_getTransactionReceipt", Eth::transaction_receipt); - delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_by_block_hash_and_index); - delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_by_block_number_and_index); - delegate.add_method("eth_getCompilers", Eth::compilers); - delegate.add_method("eth_compileLLL", Eth::compile_lll); - delegate.add_method("eth_compileSolidity", Eth::compile_solidity); - delegate.add_method("eth_compileSerpent", Eth::compile_serpent); - delegate.add_method("eth_getLogs", Eth::logs); - delegate.add_method("eth_getWork", Eth::work); - delegate.add_method("eth_submitWork", Eth::submit_work); - delegate.add_method("eth_submitHashrate", Eth::submit_hashrate); - delegate + /// Used for submitting a proof-of-work solution. + #[name("eth_submitWork")] + fn submit_work(&self, H64, H256, H256) -> Result; + + /// Used for submitting mining hashrate. + #[name("eth_submitHashrate")] + fn submit_hashrate(&self, U256, H256) -> Result; } } -/// Eth filters rpc api (polling). -// TODO: do filters api properly -pub trait EthFilter: Sized + Send + Sync + 'static { - /// Returns id of new filter. - fn new_filter(&self, _: Params) -> Result; +build_rpc_trait! { - /// Returns id of new block filter. - fn new_block_filter(&self, _: Params) -> Result; + /// Eth filters rpc api (polling). + // TODO: do filters api properly + pub trait EthFilter { + /// Returns id of new filter. + #[name("eth_newFilter")] + fn new_filter(&self, Filter) -> Result; - /// Returns id of new block filter. - fn new_pending_transaction_filter(&self, _: Params) -> Result; + /// Returns id of new block filter. + #[name("eth_newBlockFilter")] + fn new_block_filter(&self) -> Result; - /// Returns filter changes since last poll. - fn filter_changes(&self, _: Params) -> Result; + /// Returns id of new block filter. + #[name("eth_newPendingTransactionFilter")] + fn new_pending_transaction_filter(&self) -> Result; - /// Returns all logs matching given filter (in a range 'from' - 'to'). - fn filter_logs(&self, _: Params) -> Result; + /// Returns filter changes since last poll. + #[name("eth_getFilterChanges")] + fn filter_changes(&self, Index) -> Result; - /// Uninstalls filter. - fn uninstall_filter(&self, _: Params) -> Result; + /// Returns all logs matching given filter (in a range 'from' - 'to'). + #[name("eth_getFilterLogs")] + fn filter_logs(&self, Index) -> Result, Error>; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("eth_newFilter", EthFilter::new_filter); - delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter); - delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter); - delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes); - delegate.add_method("eth_getFilterLogs", EthFilter::filter_logs); - delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter); - delegate + /// Uninstalls filter. + #[name("eth_uninstallFilter")] + fn uninstall_filter(&self, Index) -> Result; } } @@ -227,6 +226,10 @@ pub trait EthSigning: Sized + Send + Sync + 'static { /// or an error. fn check_request(&self, _: Params) -> Result; + /// Decrypt some ECIES-encrypted message. + /// First parameter is the address with which it is encrypted, second is the ciphertext. + fn decrypt_message(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); @@ -235,6 +238,7 @@ pub trait EthSigning: Sized + Send + Sync + 'static { delegate.add_method("eth_postSign", EthSigning::post_sign); delegate.add_method("eth_postTransaction", EthSigning::post_transaction); delegate.add_method("eth_checkRequest", EthSigning::check_request); + delegate.add_method("ethcore_decryptMessage", EthSigning::decrypt_message); delegate } } diff --git a/rpc/src/v1/traits/ethcore.rs b/rpc/src/v1/traits/ethcore.rs index efd838297..56c27534a 100644 --- a/rpc/src/v1/traits/ethcore.rs +++ b/rpc/src/v1/traits/ethcore.rs @@ -76,6 +76,13 @@ pub trait Ethcore: Sized + Send + Sync + 'static { /// Returns the value of the registrar for this network. fn registry_address(&self, _: Params) -> Result; + /// Encrypt some data with a public key under ECIES. + /// First parameter is the 512-byte destination public key, second is the message. + fn encrypt_message(&self, _: Params) -> Result; + + /// Returns all pending (current) transactions from transaction queue. + fn pending_transactions(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); @@ -98,7 +105,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static { delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase); delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address); delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); - + delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); + delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); delegate } } diff --git a/rpc/src/v1/traits/personal.rs b/rpc/src/v1/traits/personal.rs index 89d63c863..988091958 100644 --- a/rpc/src/v1/traits/personal.rs +++ b/rpc/src/v1/traits/personal.rs @@ -92,12 +92,16 @@ pub trait PersonalSigner: Sized + Send + Sync + 'static { /// Reject the confirmation request. fn reject_request(&self, _: Params) -> Result; + /// Generates new authorization token. + fn generate_token(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); delegate.add_method("personal_requestsToConfirm", PersonalSigner::requests_to_confirm); delegate.add_method("personal_confirmRequest", PersonalSigner::confirm_request); delegate.add_method("personal_rejectRequest", PersonalSigner::reject_request); + delegate.add_method("personal_generateAuthorizationToken", PersonalSigner::generate_token); delegate } } diff --git a/rpc/src/v1/types/block.rs b/rpc/src/v1/types/block.rs index 21459d026..70f39ba73 100644 --- a/rpc/src/v1/types/block.rs +++ b/rpc/src/v1/types/block.rs @@ -103,7 +103,7 @@ mod tests { fn test_serialize_block_transactions() { let t = BlockTransactions::Full(vec![Transaction::default()]); let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x"}]"#); + assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null}]"#); let t = BlockTransactions::Hashes(vec![H256::default().into()]); let serialized = serde_json::to_string(&t).unwrap(); diff --git a/rpc/src/v1/types/block_number.rs b/rpc/src/v1/types/block_number.rs index 302d099d5..01625f8ed 100644 --- a/rpc/src/v1/types/block_number.rs +++ b/rpc/src/v1/types/block_number.rs @@ -31,6 +31,12 @@ pub enum BlockNumber { Pending, } +impl Default for BlockNumber { + fn default() -> Self { + BlockNumber::Latest + } +} + impl Deserialize for BlockNumber { fn deserialize(deserializer: &mut D) -> Result where D: Deserializer { diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index 09c899057..57ff9f22e 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -70,10 +70,16 @@ impl Visitor for BytesVisitor { type Value = Bytes; fn visit_str(&mut self, value: &str) -> Result where E: Error { - if value.len() >= 2 && &value[0..2] == "0x" { - Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![]))) + if value.is_empty() { + warn!( + target: "deprecated", + "Deserializing empty string as empty bytes. This is a non-standard behaviour that will be removed in future versions. Please update your code to send `0x` instead!" + ); + Ok(Bytes::new(Vec::new())) + } else if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 { + Ok(Bytes::new(try!(FromHex::from_hex(&value[2..]).map_err(|_| Error::custom("invalid hex"))))) } else { - Err(Error::custom("invalid hex")) + Err(Error::custom("invalid format")) } } @@ -95,5 +101,31 @@ mod tests { let serialized = serde_json::to_string(&bytes).unwrap(); assert_eq!(serialized, r#""0x0123456789abcdef""#); } + + #[test] + fn test_bytes_deserialize() { + // TODO [ToDr] Uncomment when Mist starts sending correct data + // let bytes1: Result = serde_json::from_str(r#""""#); + let bytes2: Result = serde_json::from_str(r#""0x123""#); + let bytes3: Result = serde_json::from_str(r#""0xgg""#); + + let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap(); + let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap(); + let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap(); + + // assert!(bytes1.is_err()); + assert!(bytes2.is_err()); + assert!(bytes3.is_err()); + assert_eq!(bytes4, Bytes(vec![])); + assert_eq!(bytes5, Bytes(vec![0x12])); + assert_eq!(bytes6, Bytes(vec![0x1, 0x23])); + } + + // TODO [ToDr] Remove when Mist starts sending correct data + #[test] + fn test_bytes_lenient_against_the_spec_deserialize_for_empty_string_for_mist_compatibility() { + let deserialized: Bytes = serde_json::from_str(r#""""#).unwrap(); + assert_eq!(deserialized, Bytes(Vec::new())); + } } diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index e07845211..b4a45272b 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use serde::{Deserialize, Deserializer, Error}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, Error}; use serde_json::value; use jsonrpc_core::Value; use ethcore::filter::Filter as EthFilter; use ethcore::client::BlockID; -use v1::types::{BlockNumber, H160, H256}; +use v1::types::{BlockNumber, H160, H256, Log}; /// Variadic value #[derive(Debug, PartialEq, Clone)] @@ -66,6 +66,8 @@ pub struct Filter { pub address: Option, /// Topics pub topics: Option>, + /// Limit + pub limit: Option, } impl Into for Filter { @@ -85,7 +87,29 @@ impl Into for Filter { VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect()) }).filter_map(|m| m).collect()).into_iter(); vec![iter.next(), iter.next(), iter.next(), iter.next()] - } + }, + limit: self.limit, + } + } +} + +/// Results of the filter_changes RPC. +#[derive(Debug, PartialEq)] +pub enum FilterChanges { + /// New logs. + Logs(Vec), + /// New hashes (block or transactions) + Hashes(Vec), + /// Empty result, + Empty, +} + +impl Serialize for FilterChanges { + fn serialize(&self, s: &mut S) -> Result<(), S::Error> where S: Serializer { + match *self { + FilterChanges::Logs(ref logs) => logs.serialize(s), + FilterChanges::Hashes(ref hashes) => hashes.serialize(s), + FilterChanges::Empty => (&[] as &[Value]).serialize(s), } } } @@ -120,7 +144,8 @@ mod tests { from_block: Some(BlockNumber::Earliest), to_block: Some(BlockNumber::Latest), address: None, - topics: None + topics: None, + limit: None, }); } } diff --git a/rpc/src/v1/types/hash.rs b/rpc/src/v1/types/hash.rs index 47c529235..3080aa031 100644 --- a/rpc/src/v1/types/hash.rs +++ b/rpc/src/v1/types/hash.rs @@ -20,7 +20,7 @@ use std::cmp::Ordering; use std::hash::{Hash, Hasher}; use serde; use rustc_serialize::hex::{ToHex, FromHex}; -use util::{H64 as Eth64, H256 as EthH256, H520 as EthH520, H2048 as Eth2048, H160 as Eth160}; +use util::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as Eth512, H2048 as Eth2048}; macro_rules! impl_hash { ($name: ident, $other: ident, $size: expr) => { @@ -144,6 +144,7 @@ macro_rules! impl_hash { impl_hash!(H64, Eth64, 8); impl_hash!(H160, Eth160, 20); -impl_hash!(H256, EthH256, 32); -impl_hash!(H520, EthH520, 65); +impl_hash!(H256, Eth256, 32); +impl_hash!(H512, Eth512, 64); +impl_hash!(H520, Eth520, 65); impl_hash!(H2048, Eth2048, 256); diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 312e93818..1369037ed 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -30,14 +30,15 @@ mod receipt; mod trace; mod trace_filter; mod uint; +mod work; pub use self::bytes::Bytes; pub use self::block::{Block, BlockTransactions}; pub use self::block_number::BlockNumber; pub use self::call_request::CallRequest; pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, TransactionModification}; -pub use self::filter::Filter; -pub use self::hash::{H64, H160, H256, H520, H2048}; +pub use self::filter::{Filter, FilterChanges}; +pub use self::hash::{H64, H160, H256, H512, H520, H2048}; pub use self::index::Index; pub use self::log::Log; pub use self::sync::{SyncStatus, SyncInfo, Peers}; @@ -47,3 +48,4 @@ pub use self::receipt::Receipt; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; pub use self::uint::U256; +pub use self::work::Work; diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index d4697aff2..6aa9ee899 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -16,7 +16,7 @@ use ethcore::contract_address; use ethcore::transaction::{LocalizedTransaction, Action, SignedTransaction}; -use v1::types::{Bytes, H160, H256, U256}; +use v1::types::{Bytes, H160, H256, U256, H512}; /// Transaction #[derive(Debug, Default, Serialize)] @@ -51,6 +51,9 @@ pub struct Transaction { pub creates: Option, /// Raw transaction data pub raw: Bytes, + /// Public key of the signer. + #[serde(rename="publicKey")] + pub public_key: Option, } impl From for Transaction { @@ -75,6 +78,7 @@ impl From for Transaction { Action::Call(_) => None, }, raw: ::rlp::encode(&t.signed).to_vec().into(), + public_key: t.public_key().ok().map(Into::into), } } } @@ -101,6 +105,7 @@ impl From for Transaction { Action::Call(_) => None, }, raw: ::rlp::encode(&t).to_vec().into(), + public_key: t.public_key().ok().map(Into::into), } } } @@ -114,7 +119,7 @@ mod tests { fn test_transaction_serialize() { let t = Transaction::default(); let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x"}"#); + assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null}"#); } } diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index bcd874a18..9be7b1170 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -77,6 +77,10 @@ macro_rules! impl_uint { return Err(serde::Error::custom("Invalid length.")); } + if &value[0..2] != "0x" { + return Err(serde::Error::custom("Use hex encoded numbers with 0x prefix.")) + } + $other::from_str(&value[2..]).map($name).map_err(|_| serde::Error::custom("Invalid hex value.")) } @@ -100,6 +104,8 @@ mod tests { use super::U256; use serde_json; + type Res = Result; + #[test] fn should_serialize_u256() { let serialized1 = serde_json::to_string(&U256(0.into())).unwrap(); @@ -113,6 +119,21 @@ mod tests { assert_eq!(serialized4, r#""0x100""#); } + #[test] + fn should_fail_to_deserialize_decimals() { + let deserialized1: Res = serde_json::from_str(r#""""#); + let deserialized2: Res = serde_json::from_str(r#""0""#); + let deserialized3: Res = serde_json::from_str(r#""10""#); + let deserialized4: Res = serde_json::from_str(r#""1000000""#); + let deserialized5: Res = serde_json::from_str(r#""1000000000000000000""#); + + assert!(deserialized1.is_err()); + assert!(deserialized2.is_err()); + assert!(deserialized3.is_err()); + assert!(deserialized4.is_err()); + assert!(deserialized5.is_err()); + } + #[test] fn should_deserialize_u256() { let deserialized1: U256 = serde_json::from_str(r#""0x""#).unwrap(); diff --git a/rpc/src/v1/types/work.rs b/rpc/src/v1/types/work.rs new file mode 100644 index 000000000..0817eb24a --- /dev/null +++ b/rpc/src/v1/types/work.rs @@ -0,0 +1,43 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use super::{H256, U256}; + +use serde::{Serialize, Serializer}; + +/// The result of an `eth_getWork` call: it differs based on an option +/// whether to send the block number. +#[derive(Debug, PartialEq, Eq)] +pub struct Work { + /// The proof-of-work hash. + pub pow_hash: H256, + /// The seed hash. + pub seed_hash: H256, + /// The target. + pub target: H256, + /// The block number: this isn't always stored. + pub number: Option, +} + +impl Serialize for Work { + fn serialize(&self, s: &mut S) -> Result<(), S::Error> where S: Serializer { + match self.number.as_ref() { + Some(num) => (&self.pow_hash, &self.seed_hash, &self.target, U256::from(*num)).serialize(s), + None => (&self.pow_hash, &self.seed_hash, &self.target).serialize(s), + } + } +} + diff --git a/scripts/deploy.sh b/scripts/deploy.sh new file mode 100644 index 000000000..3fcd05f7f --- /dev/null +++ b/scripts/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/bash +ll +ls +la +echo "list of biniries" +exit diff --git a/scripts/targets.sh b/scripts/targets.sh index 009b8ad1d..529937c23 100644 --- a/scripts/targets.sh +++ b/scripts/targets.sh @@ -1,10 +1,10 @@ #!/bin/bash export TARGETS=" - -p bigint\ -p rlp\ -p ethash \ -p ethcore \ + -p ethcore-bigint\ -p ethcore-dapps \ -p ethcore-rpc \ -p ethcore-signer \ diff --git a/signer/Cargo.toml b/signer/Cargo.toml index 8b7268dde..3d0e76896 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -22,7 +22,7 @@ ethcore-rpc = { path = "../rpc" } ethcore-devtools = { path = "../devtools" } parity-dapps-signer = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true} -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} [features] dev = ["clippy"] diff --git a/signer/src/authcode_store.rs b/signer/src/authcode_store.rs index 7b9ff1d6b..d8068fc88 100644 --- a/signer/src/authcode_store.rs +++ b/signer/src/authcode_store.rs @@ -48,6 +48,7 @@ impl TimeProvider for DefaultTimeProvider { /// No of seconds the hash is valid const TIME_THRESHOLD: u64 = 7; const TOKEN_LENGTH: usize = 16; +const INITIAL_TOKEN: &'static str = "initial"; /// Manages authorization codes for `SignerUIs` pub struct AuthCodes { @@ -98,7 +99,7 @@ impl AuthCodes { } /// Checks if given hash is correct identifier of `SignerUI` - pub fn is_valid(&self, hash: &H256, time: u64) -> bool { + pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool { let now = self.now.now(); // check time if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD { @@ -106,9 +107,21 @@ impl AuthCodes { return false; } + let as_token = |code| format!("{}:{}", code, time).sha3(); + + // Check if it's the initial token. + if self.is_empty() { + let initial = &as_token(INITIAL_TOKEN) == hash; + // Initial token can be used only once. + if initial { + let _ = self.generate_new(); + } + return initial; + } + // look for code self.codes.iter() - .any(|code| &format!("{}:{}", code, time).sha3() == hash) + .any(|code| &as_token(code) == hash) } /// Generates and returns a new code that can be used by `SignerUIs` @@ -124,6 +137,11 @@ impl AuthCodes { self.codes.push(code); Ok(readable_code) } + + /// Returns true if there are no tokens in this store + pub fn is_empty(&self) -> bool { + self.codes.is_empty() + } } @@ -137,12 +155,28 @@ mod tests { format!("{}:{}", val, time).sha3() } + #[test] + fn should_return_true_if_code_is_initial_and_store_is_empty() { + // given + let code = "initial"; + let time = 99; + let mut codes = AuthCodes::new(vec![], || 100); + + // when + let res1 = codes.is_valid(&generate_hash(code, time), time); + let res2 = codes.is_valid(&generate_hash(code, time), time); + + // then + assert_eq!(res1, true); + assert_eq!(res2, false); + } + #[test] fn should_return_true_if_hash_is_valid() { // given let code = "23521352asdfasdfadf"; let time = 99; - let codes = AuthCodes::new(vec![code.into()], || 100); + let mut codes = AuthCodes::new(vec![code.into()], || 100); // when let res = codes.is_valid(&generate_hash(code, time), time); @@ -156,7 +190,7 @@ mod tests { // given let code = "23521352asdfasdfadf"; let time = 99; - let codes = AuthCodes::new(vec!["1".into()], || 100); + let mut codes = AuthCodes::new(vec!["1".into()], || 100); // when let res = codes.is_valid(&generate_hash(code, time), time); @@ -171,7 +205,7 @@ mod tests { let code = "23521352asdfasdfadf"; let time = 107; let time2 = 93; - let codes = AuthCodes::new(vec![code.into()], || 100); + let mut codes = AuthCodes::new(vec![code.into()], || 100); // when let res1 = codes.is_valid(&generate_hash(code, time), time); diff --git a/signer/src/tests/mod.rs b/signer/src/tests/mod.rs index eaed49de8..61b2ff1d3 100644 --- a/signer/src/tests/mod.rs +++ b/signer/src/tests/mod.rs @@ -14,24 +14,48 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::env; +use std::ops::{Deref, DerefMut}; use std::thread; -use std::time::Duration; +use std::time::{self, Duration}; use std::sync::Arc; -use devtools::http_client; +use devtools::{http_client, RandomTempPath}; use rpc::ConfirmationsQueue; +use util::Hashable; use rand; use ServerBuilder; use Server; +use AuthCodes; -pub fn serve() -> Server { +pub struct GuardedAuthCodes { + authcodes: AuthCodes, + path: RandomTempPath, +} +impl Deref for GuardedAuthCodes { + type Target = AuthCodes; + fn deref(&self) -> &Self::Target { + &self.authcodes + } +} +impl DerefMut for GuardedAuthCodes { + fn deref_mut(&mut self) -> &mut AuthCodes { + &mut self.authcodes + } +} + +pub fn serve() -> (Server, usize, GuardedAuthCodes) { + let mut path = RandomTempPath::new(); + path.panic_on_drop_failure = false; let queue = Arc::new(ConfirmationsQueue::default()); - let builder = ServerBuilder::new(queue, env::temp_dir()); + let builder = ServerBuilder::new(queue, path.to_path_buf()); let port = 35000 + rand::random::() % 10000; let res = builder.start(format!("127.0.0.1:{}", port).parse().unwrap()).unwrap(); thread::sleep(Duration::from_millis(25)); - res + + (res, port, GuardedAuthCodes { + authcodes: AuthCodes::from_file(&path).unwrap(), + path: path, + }) } pub fn request(server: Server, request: &str) -> http_client::Response { @@ -41,7 +65,7 @@ pub fn request(server: Server, request: &str) -> http_client::Response { #[test] fn should_reject_invalid_host() { // given - let server = serve(); + let server = serve().0; // when let response = request(server, @@ -62,7 +86,7 @@ fn should_reject_invalid_host() { #[test] fn should_serve_styles_even_on_disallowed_domain() { // given - let server = serve(); + let server = serve().0; // when let response = request(server, @@ -79,3 +103,103 @@ fn should_serve_styles_even_on_disallowed_domain() { assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); } +#[test] +fn should_block_if_authorization_is_incorrect() { + // given + let (server, port, _) = serve(); + + // when + let response = request(server, + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Upgrade\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol: wrong\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", port) + ); + + // then + assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); +} + +#[test] +fn should_allow_if_authorization_is_correct() { + // given + let (server, port, mut authcodes) = serve(); + let code = authcodes.generate_new().unwrap().replace("-", ""); + authcodes.to_file(&authcodes.path).unwrap(); + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + + // when + let response = request(server, + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol: {:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + + // then + assert_eq!(response.status, "HTTP/1.1 101 Switching Protocols".to_owned()); +} + +#[test] +fn should_allow_initial_connection_but_only_once() { + // given + let (server, port, authcodes) = serve(); + let code = "initial"; + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + assert!(authcodes.is_empty()); + + // when + let response1 = http_client::request(server.addr(), + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol:{:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + let response2 = http_client::request(server.addr(), + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol:{:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + + + // then + assert_eq!(response1.status, "HTTP/1.1 101 Switching Protocols".to_owned()); + assert_eq!(response2.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); +} diff --git a/signer/src/ws_server/mod.rs b/signer/src/ws_server/mod.rs index 6d332adbe..697fbd4c7 100644 --- a/signer/src/ws_server/mod.rs +++ b/signer/src/ws_server/mod.rs @@ -127,7 +127,7 @@ impl Server { // Spawn a thread with event loop let handle = thread::spawn(move || { ph.catch_panic(move || { - match ws.listen(addr.clone()).map_err(ServerError::from) { + match ws.listen(addr).map_err(ServerError::from) { Err(ServerError::IoError(io)) => die(format!( "Signer: Could not start listening on specified address. Make sure that no other instance is running on Signer's port. Details: {:?}", io @@ -180,7 +180,6 @@ impl Drop for Server { self.queue.finish(); self.broadcaster_handle.take().unwrap().join().unwrap(); self.handle.take().unwrap().join().unwrap(); - } } diff --git a/signer/src/ws_server/session.rs b/signer/src/ws_server/session.rs index cd3e2eee3..afc6606d7 100644 --- a/signer/src/ws_server/session.rs +++ b/signer/src/ws_server/session.rs @@ -59,7 +59,7 @@ fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool { } } -fn auth_is_valid(codes: &Path, protocols: ws::Result>) -> bool { +fn auth_is_valid(codes_path: &Path, protocols: ws::Result>) -> bool { match protocols { Ok(ref protocols) if protocols.len() == 1 => { protocols.iter().any(|protocol| { @@ -69,8 +69,15 @@ fn auth_is_valid(codes: &Path, protocols: ws::Result>) -> bool { if let (Some(auth), Some(time)) = (auth, time) { // Check if the code is valid - AuthCodes::from_file(codes) - .map(|codes| codes.is_valid(&auth, time)) + AuthCodes::from_file(codes_path) + .map(|mut codes| { + let res = codes.is_valid(&auth, time); + // make sure to save back authcodes - it might have been modified + if let Err(_) = codes.to_file(codes_path) { + warn!(target: "signer", "Couldn't save authorization codes to file."); + } + res + }) .unwrap_or(false) } else { false diff --git a/sync/Cargo.toml b/sync/Cargo.toml index a73077c9c..99c522075 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -17,7 +17,7 @@ ethcore-network = { path = "../util/network" } ethcore-io = { path = "../util/io" } ethcore = { path = "../ethcore" } rlp = { path = "../util/rlp" } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} log = "0.3" env_logger = "0.3" time = "0.1.34" diff --git a/sync/src/blocks.rs b/sync/src/blocks.rs index 753ba7111..ad842ced6 100644 --- a/sync/src/blocks.rs +++ b/sync/src/blocks.rs @@ -233,7 +233,7 @@ impl BlockCollection { fn insert_body(&mut self, b: Bytes) -> Result<(), NetworkError> { let body = UntrustedRlp::new(&b); let tx = try!(body.at(0)); - let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here + let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here let uncles = try!(body.at(1)).as_raw().sha3(); let header_id = HeaderId { transactions_root: tx_root, diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ea5e593f3..565c53827 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -108,7 +108,6 @@ known_heap_size!(0, PeerInfo); type PacketDecodeError = DecoderError; -const PROTOCOL_VERSION: u8 = 64u8; const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; @@ -426,7 +425,7 @@ impl ChainSync { self.request_snapshot_manifest(io, peer_id); self.state = SyncState::SnapshotManifest; } - + /// Restart sync after bad block has been detected. May end up re-downloading up to QUEUE_SIZE blocks fn restart_on_bad_block(&mut self, io: &mut SyncIo) { // Do not assume that the block queue/chain still has our last_imported_block @@ -807,7 +806,7 @@ impl ChainSync { } let manifest_rlp = try!(r.at(0)); - let manifest = match ManifestData::from_rlp(&manifest_rlp.as_raw()) { + let manifest = match ManifestData::from_rlp(manifest_rlp.as_raw()) { Err(e) => { trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e); io.disconnect_peer(peer_id); @@ -995,8 +994,8 @@ impl ChainSync { self.request_snapshot_data(io, peer_id); } }, - SyncState::SnapshotManifest => (), //already downloading from other peer - SyncState::Waiting | SyncState::SnapshotWaiting => () + SyncState::SnapshotManifest | //already downloading from other peer + SyncState::Waiting | SyncState::SnapshotWaiting => () } } } @@ -1274,7 +1273,7 @@ impl ChainSync { let pv64 = io.eth_protocol_version(peer) >= 64; let mut packet = RlpStream::new_list(if pv64 { 7 } else { 5 }); let chain = io.chain().chain_info(); - packet.append(&(PROTOCOL_VERSION as u32)); + packet.append(&(io.eth_protocol_version(peer) as u32)); packet.append(&self.network_id); packet.append(&chain.total_difficulty); packet.append(&chain.best_block_hash); @@ -1439,8 +1438,7 @@ impl ChainSync { }, None => { trace!(target: "sync", "{}: No manifest to return", peer_id); - let rlp = RlpStream::new_list(0); - rlp + RlpStream::new_list(0) } }; Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp))) @@ -1457,8 +1455,7 @@ impl ChainSync { rlp }, None => { - let rlp = RlpStream::new_list(0); - rlp + RlpStream::new_list(0) } }; Ok(Some((SNAPSHOT_DATA_PACKET, rlp))) @@ -1543,6 +1540,7 @@ impl ChainSync { }) } + #[cfg_attr(feature="dev", allow(match_same_arms))] pub fn maintain_peers(&mut self, io: &mut SyncIo) { let tick = time::precise_time_s(); let mut aborting = Vec::new(); @@ -1712,7 +1710,7 @@ impl ChainSync { return 0; } - let all_transactions_hashes = transactions.iter().map(|ref tx| tx.hash()).collect::>(); + let all_transactions_hashes = transactions.iter().map(|tx| tx.hash()).collect::>(); let all_transactions_rlp = { let mut packet = RlpStream::new_list(transactions.len()); for tx in &transactions { packet.append(tx); } @@ -1862,8 +1860,8 @@ mod tests { fn return_receipts_empty() { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &mut queue, None); let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0); @@ -1875,8 +1873,8 @@ mod tests { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); let sync = dummy_sync_with_peer(H256::new(), &client); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let mut receipt_list = RlpStream::new_list(4); receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -1931,8 +1929,8 @@ mod tests { let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &mut queue, None); let unknown: H256 = H256::new(); let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); @@ -1970,8 +1968,8 @@ mod tests { let mut client = TestBlockChainClient::new(); let mut queue = VecDeque::new(); let sync = dummy_sync_with_peer(H256::new(), &client); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let mut node_list = RlpStream::new_list(3); node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -2026,8 +2024,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); let chain_info = client.chain_info(); - let mut ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &mut queue, None); let lagging_peers = sync.get_lagging_peers(&chain_info, &io); @@ -2058,8 +2056,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); @@ -2079,8 +2077,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); @@ -2100,8 +2098,8 @@ mod tests { let hash = client.block_hash(BlockID::Number(99)).unwrap(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); @@ -2120,8 +2118,8 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); // Try to propagate same transactions for the second time let peer_count2 = sync.propagate_new_transactions(&mut io); @@ -2142,8 +2140,8 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]); // Try to propagate same transactions for the second time @@ -2166,17 +2164,17 @@ mod tests { client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); + let ss = TestSnapshotService::new(); // should sent some { - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peer_count = sync.propagate_new_transactions(&mut io); assert_eq!(1, io.queue.len()); assert_eq!(1, peer_count); } // Insert some more client.insert_transaction_to_queue(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); // Propagate new transactions let peer_count2 = sync.propagate_new_transactions(&mut io); // And now the peer should have all transactions @@ -2202,8 +2200,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); //sync.have_common_block = true; - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let block = UntrustedRlp::new(&block_data); @@ -2221,8 +2219,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let block = UntrustedRlp::new(&block_data); @@ -2237,8 +2235,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let empty_data = vec![]; let block = UntrustedRlp::new(&empty_data); @@ -2254,8 +2252,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let hashes_data = get_dummy_hashes(); let hashes_rlp = UntrustedRlp::new(&hashes_data); @@ -2271,8 +2269,8 @@ mod tests { client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let empty_hashes_data = vec![]; let hashes_rlp = UntrustedRlp::new(&empty_hashes_data); @@ -2291,8 +2289,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); sync.propagate_new_hashes(&chain_info, &mut io, &peers); @@ -2311,8 +2309,8 @@ mod tests { let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); let peers = sync.get_lagging_peers(&chain_info, &io); sync.propagate_blocks(&chain_info, &mut io, &[], &peers); @@ -2346,8 +2344,8 @@ mod tests { // when { let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); @@ -2361,8 +2359,8 @@ mod tests { } { let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks); sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]); } @@ -2386,8 +2384,8 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let mut queue = VecDeque::new(); - let mut ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &mut queue, None); // when sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); diff --git a/sync/src/tests/snapshot.rs b/sync/src/tests/snapshot.rs index b27602b0d..adbb3ce48 100644 --- a/sync/src/tests/snapshot.rs +++ b/sync/src/tests/snapshot.rs @@ -74,14 +74,14 @@ impl SnapshotService for TestSnapshotService { } fn status(&self) -> RestorationStatus { - match &*self.restoration_manifest.lock() { - &Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() && + match *self.restoration_manifest.lock() { + Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() && self.block_restoration_chunks.lock().len() == manifest.block_hashes.len() => RestorationStatus::Inactive, - &Some(_) => RestorationStatus::Ongoing { + Some(_) => RestorationStatus::Ongoing { state_chunks_done: self.state_restoration_chunks.lock().len() as u32, block_chunks_done: self.block_restoration_chunks.lock().len() as u32, }, - &None => RestorationStatus::Inactive, + None => RestorationStatus::Inactive, } } @@ -98,13 +98,13 @@ impl SnapshotService for TestSnapshotService { } fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { - if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.state_hashes.iter().any(|h| h == &hash)) { + if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.state_hashes.iter().any(|h| h == &hash)) { self.state_restoration_chunks.lock().insert(hash, chunk); } } fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { - if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.block_hashes.iter().any(|h| h == &hash)) { + if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.block_hashes.iter().any(|h| h == &hash)) { self.block_restoration_chunks.lock().insert(hash, chunk); } } diff --git a/util/Cargo.toml b/util/Cargo.toml index 106251a3f..81916555c 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -23,12 +23,12 @@ rlp = { path = "rlp" } heapsize = { version = "0.3", features = ["unstable"] } itertools = "0.4" sha3 = { path = "sha3" } -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} ethcore-devtools = { path = "../devtools" } libc = "0.2.7" vergen = "0.1" target_info = "0.1" -bigint = { path = "bigint" } +ethcore-bigint = { path = "bigint" } parking_lot = "0.2.6" using_queue = { path = "using_queue" } table = { path = "table" } diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 68778e18c..ee25ce846 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -1,8 +1,9 @@ [package] -description = "Rust-assembler implementation of big integers arithmetic" +description = "Large fixed-size integers and hash function outputs" homepage = "http://ethcore.io" +repository = "https://github.com/ethcore/parity" license = "GPL-3.0" -name = "bigint" +name = "ethcore-bigint" version = "0.1.0" authors = ["Ethcore "] build = "build.rs" diff --git a/util/bigint/src/lib.rs b/util/bigint/src/lib.rs index 746cc8139..307aed3ce 100644 --- a/util/bigint/src/lib.rs +++ b/util/bigint/src/lib.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Efficient large, fixed-size big integers and hashes. + #![cfg_attr(asm_available, feature(asm))] extern crate rand; diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index c5dd033cb..dab00537e 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -30,11 +30,12 @@ // If not, see . // -//! Big unsigned integer types +//! Big unsigned integer types. //! //! Implementation of a various large-but-fixed sized unsigned integer types. -//! The functions here are designed to be fast. -//! +//! The functions here are designed to be fast. There are optional `x86_64` +//! implementations for even more speed, hidden behind the `x64_arithmetic` +//! feature flag. use std::{mem, fmt}; use std::str::{FromStr}; diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 50ef6a5e4..0a3dfeff9 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -14,7 +14,7 @@ time = "0.1.34" tiny-keccak = "1.0" rust-crypto = "0.2.34" slab = "0.2" -clippy = { version = "0.0.85", optional = true} +clippy = { version = "0.0.90", optional = true} igd = "0.5.0" libc = "0.2.7" parking_lot = "0.2.6" diff --git a/util/rlp/Cargo.toml b/util/rlp/Cargo.toml index eba3a2842..c24e4cc59 100644 --- a/util/rlp/Cargo.toml +++ b/util/rlp/Cargo.toml @@ -7,6 +7,6 @@ authors = ["Ethcore "] [dependencies] elastic-array = "0.5" -bigint = { path = "../bigint" } +ethcore-bigint = { path = "../bigint" } lazy_static = "0.2" rustc-serialize = "0.3" \ No newline at end of file diff --git a/util/rlp/src/bytes.rs b/util/rlp/src/bytes.rs index 5940d21d2..07ac108d6 100644 --- a/util/rlp/src/bytes.rs +++ b/util/rlp/src/bytes.rs @@ -174,6 +174,8 @@ pub enum FromBytesError { DataIsTooLong, /// Integer-representation is non-canonically prefixed with zero byte(s). ZeroPrefixedInt, + /// String representation is not utf-8 + InvalidUtf8, } impl StdError for FromBytesError { @@ -199,7 +201,7 @@ pub trait FromBytes: Sized { impl FromBytes for String { fn from_bytes(bytes: &[u8]) -> FromBytesResult { - Ok(::std::str::from_utf8(bytes).unwrap().to_owned()) + ::std::str::from_utf8(bytes).map(|s| s.to_owned()).map_err(|_| FromBytesError::InvalidUtf8) } } diff --git a/util/rlp/src/lib.rs b/util/rlp/src/lib.rs index 2cc8c3bd8..da5e816b5 100644 --- a/util/rlp/src/lib.rs +++ b/util/rlp/src/lib.rs @@ -65,7 +65,7 @@ pub use self::rlpin::{Rlp, RlpIterator}; pub use self::rlpstream::RlpStream; pub use self::rlpcompression::RlpType; -extern crate bigint; +extern crate ethcore_bigint as bigint; extern crate elastic_array; extern crate rustc_serialize; diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 7c5e929f4..80b44c0e7 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -20,6 +20,7 @@ //! as use std::fmt; +use std::cmp::min; use std::ops::{Deref, DerefMut}; /// Slice pretty print helper @@ -71,6 +72,32 @@ pub enum BytesRef<'a> { Fixed(&'a mut [u8]) } +impl<'a> BytesRef<'a> { + /// Writes given `input` to this `BytesRef` starting at `offset`. + /// Returns number of bytes written to the ref. + /// NOTE can return number greater then `input.len()` in case flexible vector had to be extended. + pub fn write(&mut self, offset: usize, input: &[u8]) -> usize { + match *self { + BytesRef::Flexible(ref mut data) => { + let data_len = data.len(); + let wrote = input.len() + if data_len > offset { 0 } else { offset - data_len }; + + data.resize(offset, 0); + data.extend_from_slice(input); + wrote + }, + BytesRef::Fixed(ref mut data) if offset < data.len() => { + let max = min(data.len() - offset, input.len()); + for i in 0..max { + data[offset + i] = input[i]; + } + max + }, + _ => 0 + } + } +} + impl<'a> Deref for BytesRef<'a> { type Target = [u8]; @@ -92,4 +119,61 @@ impl <'a> DerefMut for BytesRef<'a> { } /// Vector of bytes. -pub type Bytes = Vec; \ No newline at end of file +pub type Bytes = Vec; + +#[cfg(test)] +mod tests { + use super::BytesRef; + + #[test] + fn should_write_bytes_to_fixed_bytesref() { + // given + let mut data1 = vec![0, 0, 0]; + let mut data2 = vec![0, 0, 0]; + let (res1, res2) = { + let mut bytes1 = BytesRef::Fixed(&mut data1[..]); + let mut bytes2 = BytesRef::Fixed(&mut data2[1..2]); + + // when + let res1 = bytes1.write(1, &[1, 1, 1]); + let res2 = bytes2.write(3, &[1, 1, 1]); + (res1, res2) + }; + + // then + assert_eq!(&data1, &[0, 1, 1]); + assert_eq!(res1, 2); + + assert_eq!(&data2, &[0, 0, 0]); + assert_eq!(res2, 0); + } + + #[test] + fn should_write_bytes_to_flexible_bytesref() { + // given + let mut data1 = vec![0, 0, 0]; + let mut data2 = vec![0, 0, 0]; + let mut data3 = vec![0, 0, 0]; + let (res1, res2, res3) = { + let mut bytes1 = BytesRef::Flexible(&mut data1); + let mut bytes2 = BytesRef::Flexible(&mut data2); + let mut bytes3 = BytesRef::Flexible(&mut data3); + + // when + let res1 = bytes1.write(1, &[1, 1, 1]); + let res2 = bytes2.write(3, &[1, 1, 1]); + let res3 = bytes3.write(5, &[1, 1, 1]); + (res1, res2, res3) + }; + + // then + assert_eq!(&data1, &[0, 1, 1, 1]); + assert_eq!(res1, 3); + + assert_eq!(&data2, &[0, 0, 0, 1, 1, 1]); + assert_eq!(res2, 3); + + assert_eq!(&data3, &[0, 0, 0, 0, 0, 1, 1, 1]); + assert_eq!(res3, 5); + } +} diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 177df5fa0..708b8d870 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -330,8 +330,8 @@ impl Database { /// Commit buffered changes to database. pub fn flush(&self) -> Result<(), String> { - match &*self.db.read() { - &Some(DBAndColumns { ref db, ref cfs }) => { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { let batch = WriteBatch::new(); let mut overlay = self.overlay.write(); @@ -366,15 +366,15 @@ impl Database { } db.write_opt(batch, &self.write_opts) }, - &None => Err("Database is closed".to_owned()) + None => Err("Database is closed".to_owned()) } } /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> Result<(), String> { - match &*self.db.read() { - &Some(DBAndColumns { ref db, ref cfs }) => { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { let batch = WriteBatch::new(); let ops = tr.ops; for op in ops { @@ -393,14 +393,14 @@ impl Database { } db.write_opt(batch, &self.write_opts) }, - &None => Err("Database is closed".to_owned()) + None => Err("Database is closed".to_owned()) } } /// Get value by key. pub fn get(&self, col: Option, key: &[u8]) -> Result, String> { - match &*self.db.read() { - &Some(DBAndColumns { ref db, ref cfs }) => { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; match overlay.get(key) { Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), @@ -412,15 +412,15 @@ impl Database { }, } }, - &None => Ok(None), + None => Ok(None), } } /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. // TODO: support prefix seek for unflushed data pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - match &*self.db.read() { - &Some(DBAndColumns { ref db, ref cfs }) => { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { let mut iter = col.map_or_else(|| db.iterator(IteratorMode::From(prefix, Direction::Forward)), |c| db.iterator_cf(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); match iter.next() { @@ -429,19 +429,19 @@ impl Database { _ => None } }, - &None => None, + None => None, } } /// Get database iterator for flushed data. pub fn iter(&self, col: Option) -> DatabaseIterator { //TODO: iterate over overlay - match &*self.db.read() { - &Some(DBAndColumns { ref db, ref cfs }) => { + match *self.db.read() { + Some(DBAndColumns { ref db, ref cfs }) => { col.map_or_else(|| DatabaseIterator { iter: db.iterator(IteratorMode::Start) }, |c| DatabaseIterator { iter: db.iterator_cf(cfs[c as usize], IteratorMode::Start).unwrap() }) }, - &None => panic!("Not supported yet") //TODO: return an empty iterator or change return type + None => panic!("Not supported yet") //TODO: return an empty iterator or change return type } } @@ -458,8 +458,6 @@ impl Database { let mut backup_db = PathBuf::from(&self.path); backup_db.pop(); backup_db.push("backup_db"); - println!("Path at {:?}", self.path); - println!("Backup at {:?}", backup_db); let existed = match fs::rename(&self.path, &backup_db) { Ok(_) => true, diff --git a/util/src/lib.rs b/util/src/lib.rs index 57ea9c152..17c2f5151 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -99,7 +99,7 @@ extern crate time; extern crate ethcore_devtools as devtools; extern crate libc; extern crate target_info; -extern crate bigint; +extern crate ethcore_bigint as bigint; extern crate parking_lot; extern crate ansi_term; extern crate tiny_keccak; diff --git a/util/src/trie/fatdb.rs b/util/src/trie/fatdb.rs index bb35bd467..f4c65a84b 100644 --- a/util/src/trie/fatdb.rs +++ b/util/src/trie/fatdb.rs @@ -46,8 +46,8 @@ impl<'db> FatDB<'db> { } impl<'db> Trie for FatDB<'db> { - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(FatDBIterator::new(&self.raw)) + fn iter<'a>(&'a self) -> super::Result + 'a>> { + FatDBIterator::new(&self.raw).map(|iter| Box::new(iter) as Box<_>) } fn root(&self) -> &H256 { @@ -73,22 +73,24 @@ pub struct FatDBIterator<'db> { impl<'db> FatDBIterator<'db> { /// Creates new iterator. - pub fn new(trie: &'db TrieDB) -> Self { - FatDBIterator { - trie_iterator: TrieDBIterator::new(trie), + pub fn new(trie: &'db TrieDB) -> super::Result { + Ok(FatDBIterator { + trie_iterator: try!(TrieDBIterator::new(trie)), trie: trie, - } + }) } } impl<'db> Iterator for FatDBIterator<'db> { - type Item = (Vec, &'db [u8]); + type Item = TrieItem<'db>; fn next(&mut self) -> Option { self.trie_iterator.next() - .map(|(hash, value)| { - (self.trie.db().get_aux(&hash).expect("Missing fatdb hash"), value) - }) + .map(|res| + res.map(|(hash, value)| { + (self.trie.db().get_aux(&hash).expect("Missing fatdb hash"), value) + }) + ) } } @@ -105,5 +107,5 @@ fn fatdb_to_trie() { } let t = FatDB::new(&memdb, &root).unwrap(); assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), &[0x01u8, 0x23]); - assert_eq!(t.iter().collect::>(), vec![(vec![0x01u8, 0x23], &[0x01u8, 0x23] as &[u8])]); + assert_eq!(t.iter().unwrap().map(Result::unwrap).collect::>(), vec![(vec![0x01u8, 0x23], &[0x01u8, 0x23] as &[u8])]); } diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index b71f6a5e2..6eebd8f5d 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -72,12 +72,12 @@ impl fmt::Display for TrieError { } } -/// Trie-Item type. -pub type TrieItem<'a> = (Vec, &'a [u8]); - /// Trie result type. Boxed to avoid copying around extra space for `H256`s on successful queries. pub type Result = ::std::result::Result>; +/// Trie-Item type. +pub type TrieItem<'a> = Result<(Vec, &'a [u8])>; + /// A key-value datastore implemented as a database-backed modified Merkle tree. pub trait Trie { /// Return the root of the trie. @@ -102,7 +102,7 @@ pub trait Trie { where 'a: 'b, R: Recorder; /// Returns an iterator over elements of trie. - fn iter<'a>(&'a self) -> Box + 'a>; + fn iter<'a>(&'a self) -> Result + 'a>>; } /// A key-value datastore implemented as a database-backed modified Merkle tree. @@ -193,7 +193,7 @@ impl<'db> Trie for TrieKinds<'db> { wrapper!(self, get_recorded, key, r) } - fn iter<'a>(&'a self) -> Box + 'a> { + fn iter<'a>(&'a self) -> Result + 'a>> { wrapper!(self, iter,) } } diff --git a/util/src/trie/recorder.rs b/util/src/trie/recorder.rs index a48f277b4..2f1d926f0 100644 --- a/util/src/trie/recorder.rs +++ b/util/src/trie/recorder.rs @@ -63,6 +63,12 @@ pub struct BasicRecorder { min_depth: u32, } +impl Default for BasicRecorder { + fn default() -> Self { + BasicRecorder::new() + } +} + impl BasicRecorder { /// Create a new `BasicRecorder` which records all given nodes. #[inline] @@ -233,4 +239,4 @@ mod tests { ] ]); } -} \ No newline at end of file +} diff --git a/util/src/trie/sectriedb.rs b/util/src/trie/sectriedb.rs index 9e807884c..d7108dc3e 100644 --- a/util/src/trie/sectriedb.rs +++ b/util/src/trie/sectriedb.rs @@ -49,8 +49,8 @@ impl<'db> SecTrieDB<'db> { } impl<'db> Trie for SecTrieDB<'db> { - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(TrieDB::iter(&self.raw)) + fn iter<'a>(&'a self) -> super::Result + 'a>> { + TrieDB::iter(&self.raw) } fn root(&self) -> &H256 { self.raw.root() } diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 88a3399e7..f5de26f8e 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -128,7 +128,7 @@ impl<'db> TrieDB<'db> { } /// Get the root node's RLP. - fn root_node<'a, R: 'a + Recorder>(&self, r: &'a mut R) -> super::Result { + fn root_node(&self, r: &mut R) -> super::Result { self.root_data(r).map(Node::decoded) } @@ -279,30 +279,38 @@ pub struct TrieDBIterator<'a> { impl<'a> TrieDBIterator<'a> { /// Create a new iterator. - pub fn new(db: &'a TrieDB) -> TrieDBIterator<'a> { + pub fn new(db: &'a TrieDB) -> super::Result> { let mut r = TrieDBIterator { db: db, trail: vec![], key_nibbles: Vec::new(), }; - r.descend(db.root_data(&mut NoOp).unwrap()); - r + + try!(db.root_data(&mut NoOp).and_then(|root| r.descend(root))); + Ok(r) } /// Descend into a payload. - fn descend(&mut self, d: &'a [u8]) { + fn descend(&mut self, d: &'a [u8]) -> super::Result<()> { self.trail.push(Crumb { status: Status::Entering, - node: self.db.get_node(d, &mut NoOp, 0).unwrap(), + node: try!(self.db.get_node(d, &mut NoOp, 0)), }); match self.trail.last().unwrap().node { Node::Leaf(n, _) | Node::Extension(n, _) => { self.key_nibbles.extend(n.iter()); }, _ => {} } + + Ok(()) } /// Descend into a payload and get the next item. - fn descend_next(&mut self, d: &'a [u8]) -> Option<(Bytes, &'a [u8])> { self.descend(d); self.next() } + fn descend_next(&mut self, d: &'a [u8]) -> Option> { + match self.descend(d) { + Ok(()) => self.next(), + Err(e) => Some(Err(e)), + } + } /// The present key. fn key(&self) -> Bytes { @@ -312,12 +320,12 @@ impl<'a> TrieDBIterator<'a> { } impl<'a> Iterator for TrieDBIterator<'a> { - type Item = (Bytes, &'a [u8]); + type Item = TrieItem<'a>; fn next(&mut self) -> Option { let b = match self.trail.last_mut() { Some(mut b) => { b.increment(); b.clone() }, - None => return None + None => return None, }; match (b.status, b.node) { (Status::Exiting, n) => { @@ -332,7 +340,7 @@ impl<'a> Iterator for TrieDBIterator<'a> { self.trail.pop(); self.next() }, - (Status::At, Node::Leaf(_, v)) | (Status::At, Node::Branch(_, Some(v))) => Some((self.key(), v)), + (Status::At, Node::Leaf(_, v)) | (Status::At, Node::Branch(_, Some(v))) => Some(Ok((self.key(), v))), (Status::At, Node::Extension(_, d)) => self.descend_next(d), (Status::At, Node::Branch(_, _)) => self.next(), (Status::AtChild(i), Node::Branch(children, _)) if children[i].len() > 0 => { @@ -352,8 +360,8 @@ impl<'a> Iterator for TrieDBIterator<'a> { } impl<'db> Trie for TrieDB<'db> { - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(TrieDBIterator::new(self)) + fn iter<'a>(&'a self) -> super::Result + 'a>> { + TrieDBIterator::new(self).map(|iter| Box::new(iter) as Box<_>) } fn root(&self) -> &H256 { self.root } @@ -392,6 +400,6 @@ fn iterator() { } let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), t.iter().map(|x|x.0).collect::>()); - assert_eq!(d, t.iter().map(|x|x.1).collect::>()); + assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), t.iter().unwrap().map(|x| x.unwrap().0).collect::>()); + assert_eq!(d, t.iter().unwrap().map(|x| x.unwrap().1).collect::>()); } diff --git a/util/src/triehash.rs b/util/src/triehash.rs index f49b588d4..c8ab5bb08 100644 --- a/util/src/triehash.rs +++ b/util/src/triehash.rs @@ -40,7 +40,9 @@ use vector::SharedPrefix; /// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap()); /// } /// ``` -pub fn ordered_trie_root(input: Vec>) -> H256 { +pub fn ordered_trie_root(input: I) -> H256 + where I: IntoIterator> +{ let gen_input = input // first put elements into btree to sort them by nibbles // optimize it later