Merge remote-tracking branch 'parity/master' into bft

Conflicts:
	Cargo.lock
This commit is contained in:
keorn 2016-09-27 12:37:43 +02:00
commit fd6900bbb3
168 changed files with 5255 additions and 2166 deletions

View File

@ -1,7 +1,6 @@
stages: stages:
- build - build
- test - test
- deploy
variables: variables:
GIT_DEPTH: "3" GIT_DEPTH: "3"
SIMPLECOV: "true" SIMPLECOV: "true"
@ -9,6 +8,48 @@ variables:
cache: cache:
key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME" key: "$CI_BUILD_NAME/$CI_BUILD_REF_NAME"
untracked: true untracked: true
linux-stable:
stage: build
image: ethcore/rust:stable
only:
- master
- beta
- tags
- stable
script:
- cargo build --release --verbose
- strip target/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity
tags:
- rust
- rust-stable
artifacts:
paths:
- target/release/parity
name: "stable-x86_64-unknown-linux-gnu_parity"
linux-stable-14.04:
stage: build
image: ethcore/rust-14.04:latest
only:
- master
- beta
- tags
- stable
script:
- cargo build --release --verbose
- strip target/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity
tags:
- rust
- rust-14.04
artifacts:
paths:
- target/release/parity
name: "stable-x86_64-unknown-ubuntu_14_04-gnu_parity"
linux-beta: linux-beta:
stage: build stage: build
image: ethcore/rust:beta image: ethcore/rust:beta
@ -18,23 +59,16 @@ linux-beta:
- tags - tags
- stable - stable
script: script:
- export
- cargo build --release --verbose - cargo build --release --verbose
- strip target/release/parity - strip target/release/parity
- cp target/release/parity parity
tags: tags:
- rust - rust
- rust-beta - rust-beta
artifacts: artifacts:
paths: paths:
- target/release/parity - target/release/parity
name: "${CI_BUILD_NAME}_parity" name: "beta-x86_64-unknown-linux-gnu_parity"
stage: deploy allow_failure: true
tags:
- rust
- rust-beta
script:
- ./deploy.sh
linux-nightly: linux-nightly:
stage: build stage: build
image: ethcore/rust:nightly image: ethcore/rust:nightly
@ -52,7 +86,7 @@ linux-nightly:
artifacts: artifacts:
paths: paths:
- target/release/parity - target/release/parity
name: "${CI_BUILD_NAME}_parity" name: "nigthly-x86_64-unknown-linux-gnu_parity"
allow_failure: true allow_failure: true
linux-centos: linux-centos:
stage: build stage: build
@ -67,23 +101,25 @@ linux-centos:
- export CC="gcc" - export CC="gcc"
- cargo build --release --verbose - cargo build --release --verbose
- strip target/release/parity - strip target/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity
tags: tags:
- rust - rust
- rust-centos - rust-centos
artifacts: artifacts:
paths: paths:
- target/release/parity - target/release/parity
name: "${CI_BUILD_NAME}_parity" name: "x86_64-unknown-centos-gnu_parity"
linux-armv7: linux-armv7:
stage: build stage: build
image: ethcore/rust-arm:latest image: ethcore/rust-armv7:latest
only: only:
- master - master
- beta - beta
- tags - tags
- stable - stable
script: script:
- export
- rm -rf .cargo - rm -rf .cargo
- mkdir -p .cargo - mkdir -p .cargo
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
@ -91,13 +127,17 @@ linux-armv7:
- cat .cargo/config - cat .cargo/config
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/armv7-unknown-linux-gnueabihf/release/parity - target/armv7-unknown-linux-gnueabihf/release/parity
name: "${CI_BUILD_NAME}_parity" name: "armv7_unknown_linux_gnueabihf_parity"
allow_failure: true
linux-arm: linux-arm:
stage: build stage: build
image: ethcore/rust-arm:latest image: ethcore/rust-arm:latest
@ -107,7 +147,6 @@ linux-arm:
- tags - tags
- stable - stable
script: script:
- export
- rm -rf .cargo - rm -rf .cargo
- mkdir -p .cargo - mkdir -p .cargo
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
@ -115,24 +154,26 @@ linux-arm:
- cat .cargo/config - cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose - cargo build --target arm-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/arm-unknown-linux-gnueabihf/release/parity - target/arm-unknown-linux-gnueabihf/release/parity
name: "${CI_BUILD_NAME}_parity" name: "arm-unknown-linux-gnueabihf_parity"
allow_failure: true allow_failure: true
linux-armv6: linux-armv6:
stage: build stage: build
image: ethcore/rust-arm:latest image: ethcore/rust-armv6:latest
only: only:
- master - master
- beta - beta
- tags - tags
- stable - stable
script: script:
- export
- rm -rf .cargo - rm -rf .cargo
- mkdir -p .cargo - mkdir -p .cargo
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
@ -140,24 +181,26 @@ linux-armv6:
- cat .cargo/config - cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabi --release --verbose - cargo build --target arm-unknown-linux-gnueabi --release --verbose
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/arm-unknown-linux-gnueabi/release/parity - target/arm-unknown-linux-gnueabi/release/parity
name: "${CI_BUILD_NAME}_parity" name: "arm-unknown-linux-gnueabi_parity"
allow_failure: true allow_failure: true
linux-aarch64: linux-aarch64:
stage: build stage: build
image: ethcore/rust-arm:latest image: ethcore/rust-aarch64:latest
only: only:
- master - master
- beta - beta
- tags - tags
- stable - stable
script: script:
- export
- rm -rf .cargo - rm -rf .cargo
- mkdir -p .cargo - mkdir -p .cargo
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
@ -165,13 +208,16 @@ linux-aarch64:
- cat .cargo/config - cat .cargo/config
- cargo build --target aarch64-unknown-linux-gnu --release --verbose - cargo build --target aarch64-unknown-linux-gnu --release --verbose
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/aarch64-unknown-linux-gnu/release/parity - target/aarch64-unknown-linux-gnu/release/parity
name: "${CI_BUILD_NAME}_parity" name: "aarch64-unknown-linux-gnu_parity"
allow_failure: true allow_failure: true
darwin: darwin:
stage: build stage: build
@ -182,12 +228,15 @@ darwin:
- stable - stable
script: script:
- cargo build --release --verbose - cargo build --release --verbose
- aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity
tags: tags:
- osx - osx
artifacts: artifacts:
paths: paths:
- target/release/parity - target/release/parity
name: "${CI_BUILD_NAME}_parity" name: "x86_64-apple-darwin_parity"
windows: windows:
stage: build stage: build
only: only:
@ -201,37 +250,24 @@ windows:
- set RUST_BACKTRACE=1 - set RUST_BACKTRACE=1
- rustup default stable-x86_64-pc-windows-msvc - rustup default stable-x86_64-pc-windows-msvc
- cargo build --release --verbose - cargo build --release --verbose
- aws configure set aws_access_key_id %s3_key%
- aws configure set aws_secret_access_key %s3_secret%
- aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe
- aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb
tags: tags:
- rust-windows - rust-windows
artifacts: artifacts:
paths: paths:
- target/release/parity.exe - target/release/parity.exe
- target/release/parity.pdb - target/release/parity.pdb
name: "${CI_BUILD_NAME}_parity" name: "x86_64-pc-windows-msvc_parity"
linux-stable:
stage: build
image: ethcore/rust:stable
only:
- master
- beta
- tags
- stable
script:
- export
- cargo build --release --verbose
- strip target/release/parity
tags:
- rust
- rust-stable
artifacts:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
test-linux: test-linux:
stage: test stage: test
before_script: before_script:
- git submodule update --init --recursive - git submodule update --init --recursive
script: script:
- ./test.sh --verbose - ./test.sh --verbose
tags:
- rust-test
dependencies: dependencies:
- linux-stable - linux-stable

98
Cargo.lock generated
View File

@ -3,7 +3,7 @@ name = "parity"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
@ -37,7 +37,10 @@ dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -84,16 +87,6 @@ name = "base64"
version = "0.2.1" version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bigint"
version = "0.1.0"
dependencies = [
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "bit-set" name = "bit-set"
version = "0.4.0" version = "0.4.0"
@ -147,15 +140,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "clippy" name = "clippy"
version = "0.0.85" version = "0.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "clippy_lints" name = "clippy_lints"
version = "0.0.85" version = "0.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -277,7 +270,7 @@ version = "1.4.0"
dependencies = [ dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0", "ethash 1.4.0",
@ -290,13 +283,14 @@ dependencies = [
"ethjson 0.1.0", "ethjson 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"ethstore 0.1.0", "ethstore 0.1.0",
"evmjit 1.4.0",
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0", "rlp 0.1.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -304,18 +298,28 @@ dependencies = [
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "ethcore-bigint"
version = "0.1.0"
dependencies = [
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "ethcore-dapps" name = "ethcore-dapps"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-rpc 1.4.0", "ethcore-rpc 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"https-fetch 0.1.0", "https-fetch 0.1.0",
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -390,6 +394,7 @@ name = "ethcore-ipc-nano"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ethcore-ipc 1.4.0", "ethcore-ipc 1.4.0",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)", "nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
] ]
@ -449,19 +454,20 @@ dependencies = [
name = "ethcore-rpc" name = "ethcore-rpc"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0", "ethash 1.4.0",
"ethcore 1.4.0", "ethcore 1.4.0",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
"ethcore-ipc 1.4.0", "ethcore-ipc 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"ethcrypto 0.1.0",
"ethjson 0.1.0", "ethjson 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"ethstore 0.1.0", "ethstore 0.1.0",
"ethsync 1.4.0", "ethsync 1.4.0",
"json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0", "rlp 0.1.0",
@ -477,13 +483,13 @@ dependencies = [
name = "ethcore-signer" name = "ethcore-signer"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
"ethcore-rpc 1.4.0", "ethcore-rpc 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps-signer 1.4.0 (git+https://github.com/ethcore/parity-ui.git)", "parity-dapps-signer 1.4.0 (git+https://github.com/ethcore/parity-ui.git)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
@ -502,7 +508,7 @@ dependencies = [
"ethcore-ipc-nano 1.4.0", "ethcore-ipc-nano 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)", "json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
@ -515,11 +521,11 @@ version = "1.4.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"bigint 0.1.0", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
@ -546,8 +552,8 @@ dependencies = [
name = "ethcrypto" name = "ethcrypto"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bigint 0.1.0",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
@ -568,9 +574,9 @@ dependencies = [
name = "ethkey" name = "ethkey"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"bigint 0.1.0",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -601,7 +607,7 @@ dependencies = [
name = "ethsync" name = "ethsync"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.4.0", "ethcore 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
@ -619,6 +625,13 @@ dependencies = [
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "evmjit"
version = "1.4.0"
dependencies = [
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "fdlimit" name = "fdlimit"
version = "0.1.0" version = "0.1.0"
@ -770,7 +783,7 @@ source = "git+https://github.com/ethcore/json-ipc-server.git#5fbd0253750d3097b9a
dependencies = [ dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -785,7 +798,7 @@ source = "git+https://github.com/ethcore/json-tcp-server#c2858522274ae56042472bb
dependencies = [ dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -794,7 +807,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-core" name = "jsonrpc-core"
version = "3.0.0" version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -807,10 +820,10 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-http-server" name = "jsonrpc-http-server"
version = "6.1.0" version = "6.1.0"
source = "git+https://github.com/ethcore/jsonrpc-http-server.git#339f7209b01d26aea01722b3a69127235287d6a9" source = "git+https://github.com/ethcore/jsonrpc-http-server.git#2766c6708f66f6f4e667211461d220b49c0d9fdf"
dependencies = [ dependencies = [
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1305,7 +1318,7 @@ dependencies = [
[[package]] [[package]]
name = "rayon" name = "rayon"
version = "0.3.1" version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1343,8 +1356,8 @@ dependencies = [
name = "rlp" name = "rlp"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bigint 0.1.0",
"elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.0",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1675,6 +1688,14 @@ dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "toml"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "traitobject" name = "traitobject"
version = "0.0.1" version = "0.0.1"
@ -1853,8 +1874,8 @@ dependencies = [
"checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27"
"checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "97f6d6efa6d7aec74d4eca1be62164b605d43b7fcb5256e9db0449f685130cba" "checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b"
"checksum clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "dc96d3c877b63943b08ce3037c0ae8fd3bd5dead5fab11178b93afc71ca16031" "checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96"
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245" "checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
"checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc" "checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc"
"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>" "checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>"
@ -1940,7 +1961,7 @@ dependencies = [
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c" "checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
"checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a" "checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a"
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5" "checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
"checksum rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "941deb43a6254b9867fec1e0caeda38a2ad905ab18c57f7c68c396ca68998c07" "checksum rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "655df67c314c30fa3055a365eae276eb88aa4f3413a352a1ab32c1320eda41ea"
"checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29" "checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29"
"checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9" "checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9"
"checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb" "checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb"
@ -1982,6 +2003,7 @@ dependencies = [
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
"checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270"
"checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6"
"checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72"
"checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616" "checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616"
"checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0" "checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0"
"checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" "checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887"

View File

@ -25,6 +25,7 @@ ansi_term = "0.7"
lazy_static = "0.2" lazy_static = "0.2"
regex = "0.1" regex = "0.1"
isatty = "0.1" isatty = "0.1"
toml = "0.2"
ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" }
fdlimit = { path = "util/fdlimit" } fdlimit = { path = "util/fdlimit" }
ethcore = { path = "ethcore" } ethcore = { path = "ethcore" }
@ -41,8 +42,10 @@ ethcore-logger = { path = "logger" }
rlp = { path = "util/rlp" } rlp = { path = "util/rlp" }
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
ethcore-dapps = { path = "dapps", optional = true } ethcore-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethcore-stratum = { path = "stratum" } ethcore-stratum = { path = "stratum" }
serde = "0.8.0"
serde_json = "0.8.0"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.2"
@ -60,11 +63,13 @@ ui = ["dapps", "ethcore-signer/ui"]
use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"]
dapps = ["ethcore-dapps"] dapps = ["ethcore-dapps"]
ipc = ["ethcore/ipc"] ipc = ["ethcore/ipc"]
jit = ["ethcore/jit"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
json-tests = ["ethcore/json-tests"] json-tests = ["ethcore/json-tests"]
stratum = ["ipc"] stratum = ["ipc"]
ethkey-cli = ["ethcore/ethkey-cli"] ethkey-cli = ["ethcore/ethkey-cli"]
ethstore-cli = ["ethcore/ethstore-cli"] ethstore-cli = ["ethcore/ethstore-cli"]
evm-debug = ["ethcore/evm-debug"]
[[bin]] [[bin]]
path = "parity/main.rs" path = "parity/main.rs"

View File

@ -14,8 +14,8 @@ Be sure to check out [our wiki][wiki-url] for more information.
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg [gitter-image]: https://badges.gitter.im/Join%20Chat.svg
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge [gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg [license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg
[license-url]: http://www.gnu.org/licenses/gpl-3.0.en.html [license-url]: https://www.gnu.org/licenses/gpl-3.0.en.html
[doc-url]: http://ethcore.github.io/parity/ethcore/index.html [doc-url]: https://ethcore.github.io/parity/ethcore/index.html
[wiki-url]: https://github.com/ethcore/parity/wiki [wiki-url]: https://github.com/ethcore/parity/wiki
---- ----

View File

@ -33,13 +33,13 @@ parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", versio
parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" }
parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true } parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true }
mime_guess = { version = "1.6.1" } mime_guess = { version = "1.6.1" }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.8", optional = true } serde_codegen = { version = "0.8", optional = true }
[features] [features]
default = ["serde_codegen", "extra-dapps", "https-fetch/ca-github-only"] default = ["serde_codegen", "extra-dapps"]
extra-dapps = ["parity-dapps-wallet"] extra-dapps = ["parity-dapps-wallet"]
nightly = ["serde_macros"] nightly = ["serde_macros"]
dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"]

View File

@ -15,23 +15,26 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc; use std::sync::Arc;
use hyper::{server, net, Decoder, Encoder, Next}; use hyper::{server, net, Decoder, Encoder, Next, Control};
use api::types::{App, ApiError}; use api::types::{App, ApiError};
use api::response::{as_json, as_json_error, ping_response}; use api::response::{as_json, as_json_error, ping_response};
use handlers::extract_url; use handlers::extract_url;
use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath};
use apps::fetcher::ContentFetcher;
#[derive(Clone)] #[derive(Clone)]
pub struct RestApi { pub struct RestApi {
local_domain: String, local_domain: String,
endpoints: Arc<Endpoints>, endpoints: Arc<Endpoints>,
fetcher: Arc<ContentFetcher>,
} }
impl RestApi { impl RestApi {
pub fn new(local_domain: String, endpoints: Arc<Endpoints>) -> Box<Endpoint> { pub fn new(local_domain: String, endpoints: Arc<Endpoints>, fetcher: Arc<ContentFetcher>) -> Box<Endpoint> {
Box::new(RestApi { Box::new(RestApi {
local_domain: local_domain, local_domain: local_domain,
endpoints: endpoints, endpoints: endpoints,
fetcher: fetcher,
}) })
} }
@ -43,23 +46,42 @@ impl RestApi {
} }
impl Endpoint for RestApi { impl Endpoint for RestApi {
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> { fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<Handler> {
Box::new(RestApiRouter { Box::new(RestApiRouter::new(self.clone(), path, control))
api: self.clone(),
handler: as_json_error(&ApiError {
code: "404".into(),
title: "Not Found".into(),
detail: "Resource you requested has not been found.".into(),
}),
})
} }
} }
struct RestApiRouter { struct RestApiRouter {
api: RestApi, api: RestApi,
path: Option<EndpointPath>,
control: Option<Control>,
handler: Box<Handler>, handler: Box<Handler>,
} }
impl RestApiRouter {
fn new(api: RestApi, path: EndpointPath, control: Control) -> Self {
RestApiRouter {
path: Some(path),
control: Some(control),
api: api,
handler: as_json_error(&ApiError {
code: "404".into(),
title: "Not Found".into(),
detail: "Resource you requested has not been found.".into(),
}),
}
}
fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option<Box<Handler>> {
match hash {
Some(hash) if self.api.fetcher.contains(hash) => {
Some(self.api.fetcher.to_async_handler(path, control))
},
_ => None
}
}
}
impl server::Handler<net::HttpStream> for RestApiRouter { impl server::Handler<net::HttpStream> for RestApiRouter {
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next { fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next {
@ -69,13 +91,18 @@ impl server::Handler<net::HttpStream> for RestApiRouter {
return Next::write(); return Next::write();
} }
let url = url.expect("Check for None is above; qed"); let url = url.expect("Check for None early-exists above; qed");
let path = self.path.take().expect("on_request called only once, and path is always defined in new; qed");
let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed");
let endpoint = url.path.get(1).map(|v| v.as_str()); let endpoint = url.path.get(1).map(|v| v.as_str());
let hash = url.path.get(2).map(|v| v.as_str());
let handler = endpoint.and_then(|v| match v { let handler = endpoint.and_then(|v| match v {
"apps" => Some(as_json(&self.api.list_apps())), "apps" => Some(as_json(&self.api.list_apps())),
"ping" => Some(ping_response(&self.api.local_domain)), "ping" => Some(ping_response(&self.api.local_domain)),
_ => None, "content" => self.resolve_content(hash, path, control),
_ => None
}); });
// Overwrite default // Overwrite default

View File

@ -18,13 +18,13 @@
use std::fs; use std::fs;
use std::sync::{Arc}; use std::sync::{Arc};
use std::sync::atomic::{AtomicBool, Ordering};
use linked_hash_map::LinkedHashMap; use linked_hash_map::LinkedHashMap;
use page::LocalPageEndpoint; use page::LocalPageEndpoint;
use handlers::FetchControl;
pub enum ContentStatus { pub enum ContentStatus {
Fetching(Arc<AtomicBool>), Fetching(Arc<FetchControl>),
Ready(LocalPageEndpoint), Ready(LocalPageEndpoint),
} }
@ -57,10 +57,10 @@ impl ContentCache {
while len > expected_size { while len > expected_size {
let entry = self.cache.pop_front().unwrap(); let entry = self.cache.pop_front().unwrap();
match entry.1 { match entry.1 {
ContentStatus::Fetching(ref abort) => { ContentStatus::Fetching(ref fetch) => {
trace!(target: "dapps", "Aborting {} because of limit.", entry.0); trace!(target: "dapps", "Aborting {} because of limit.", entry.0);
// Mark as aborted // Mark as aborted
abort.store(true, Ordering::SeqCst); fetch.abort()
}, },
ContentStatus::Ready(ref endpoint) => { ContentStatus::Ready(ref endpoint) => {
trace!(target: "dapps", "Removing {} because of limit.", entry.0); trace!(target: "dapps", "Removing {} because of limit.", entry.0);

View File

@ -23,7 +23,6 @@ use std::{fs, env, fmt};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicBool};
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use hyper; use hyper;
@ -38,65 +37,67 @@ use handlers::{ContentHandler, ContentFetcherHandler, ContentValidator};
use endpoint::{Endpoint, EndpointPath, Handler}; use endpoint::{Endpoint, EndpointPath, Handler};
use apps::cache::{ContentCache, ContentStatus}; use apps::cache::{ContentCache, ContentStatus};
use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest};
use apps::urlhint::{URLHintContract, URLHint}; use apps::urlhint::{URLHintContract, URLHint, URLHintResult};
const MAX_CACHED_DAPPS: usize = 10; const MAX_CACHED_DAPPS: usize = 10;
pub struct AppFetcher<R: URLHint = URLHintContract> { pub struct ContentFetcher<R: URLHint = URLHintContract> {
dapps_path: PathBuf, dapps_path: PathBuf,
resolver: R, resolver: R,
cache: Arc<Mutex<ContentCache>>,
sync: Arc<SyncStatus>, sync: Arc<SyncStatus>,
dapps: Arc<Mutex<ContentCache>>,
} }
impl<R: URLHint> Drop for AppFetcher<R> { impl<R: URLHint> Drop for ContentFetcher<R> {
fn drop(&mut self) { fn drop(&mut self) {
// Clear cache path // Clear cache path
let _ = fs::remove_dir_all(&self.dapps_path); let _ = fs::remove_dir_all(&self.dapps_path);
} }
} }
impl<R: URLHint> AppFetcher<R> { impl<R: URLHint> ContentFetcher<R> {
pub fn new(resolver: R, sync_status: Arc<SyncStatus>) -> Self { pub fn new(resolver: R, sync_status: Arc<SyncStatus>) -> Self {
let mut dapps_path = env::temp_dir(); let mut dapps_path = env::temp_dir();
dapps_path.push(random_filename()); dapps_path.push(random_filename());
AppFetcher { ContentFetcher {
dapps_path: dapps_path, dapps_path: dapps_path,
resolver: resolver, resolver: resolver,
sync: sync_status, sync: sync_status,
dapps: Arc::new(Mutex::new(ContentCache::default())), cache: Arc::new(Mutex::new(ContentCache::default())),
} }
} }
#[cfg(test)] #[cfg(test)]
fn set_status(&self, app_id: &str, status: ContentStatus) { fn set_status(&self, content_id: &str, status: ContentStatus) {
self.dapps.lock().insert(app_id.to_owned(), status); self.cache.lock().insert(content_id.to_owned(), status);
} }
pub fn contains(&self, app_id: &str) -> bool { pub fn contains(&self, content_id: &str) -> bool {
let mut dapps = self.dapps.lock(); {
let mut cache = self.cache.lock();
// Check if we already have the app // Check if we already have the app
if dapps.get(app_id).is_some() { if cache.get(content_id).is_some() {
return true; return true;
} }
}
// fallback to resolver // fallback to resolver
if let Ok(app_id) = app_id.from_hex() { if let Ok(content_id) = content_id.from_hex() {
// if app_id is valid, but we are syncing always return true. // if app_id is valid, but we are syncing always return true.
if self.sync.is_major_syncing() { if self.sync.is_major_syncing() {
return true; return true;
} }
// else try to resolve the app_id // else try to resolve the app_id
self.resolver.resolve(app_id).is_some() self.resolver.resolve(content_id).is_some()
} else { } else {
false false
} }
} }
pub fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler> { pub fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler> {
let mut dapps = self.dapps.lock(); let mut cache = self.cache.lock();
let app_id = path.app_id.clone(); let content_id = path.app_id.clone();
if self.sync.is_major_syncing() { if self.sync.is_major_syncing() {
return Box::new(ContentHandler::error( return Box::new(ContentHandler::error(
@ -108,41 +109,68 @@ impl<R: URLHint> AppFetcher<R> {
} }
let (new_status, handler) = { let (new_status, handler) = {
let status = dapps.get(&app_id); let status = cache.get(&content_id);
match status { match status {
// Just server dapp // Just server dapp
Some(&mut ContentStatus::Ready(ref endpoint)) => { Some(&mut ContentStatus::Ready(ref endpoint)) => {
(None, endpoint.to_async_handler(path, control)) (None, endpoint.to_async_handler(path, control))
}, },
// App is already being fetched // App is already being fetched
Some(&mut ContentStatus::Fetching(_)) => { Some(&mut ContentStatus::Fetching(ref fetch_control)) => {
(None, Box::new(ContentHandler::error_with_refresh( trace!(target: "dapps", "Content fetching in progress. Waiting...");
StatusCode::ServiceUnavailable, (None, fetch_control.to_handler(control))
"Download In Progress",
"This dapp is already being downloaded. Please wait...",
None,
)) as Box<Handler>)
}, },
// We need to start fetching app // We need to start fetching app
None => { None => {
let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true."); trace!(target: "dapps", "Content unavailable. Fetching...");
let app = self.resolver.resolve(app_hex); let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true.");
let content = self.resolver.resolve(content_hex);
if let Some(app) = app { let cache = self.cache.clone();
let abort = Arc::new(AtomicBool::new(false)); let on_done = move |id: String, result: Option<LocalPageEndpoint>| {
let mut cache = cache.lock();
match result {
Some(endpoint) => {
cache.insert(id, ContentStatus::Ready(endpoint));
},
// In case of error
None => {
cache.remove(&id);
},
}
};
(Some(ContentStatus::Fetching(abort.clone())), Box::new(ContentFetcherHandler::new( match content {
app, Some(URLHintResult::Dapp(dapp)) => {
abort, let (handler, fetch_control) = ContentFetcherHandler::new(
dapp.url(),
control, control,
path.using_dapps_domains, path.using_dapps_domains,
DappInstaller { DappInstaller {
dapp_id: app_id.clone(), id: content_id.clone(),
dapps_path: self.dapps_path.clone(), dapps_path: self.dapps_path.clone(),
dapps: self.dapps.clone(), on_done: Box::new(on_done),
} }
)) as Box<Handler>) );
} else {
(Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box<Handler>)
},
Some(URLHintResult::Content(content)) => {
let (handler, fetch_control) = ContentFetcherHandler::new(
content.url,
control,
path.using_dapps_domains,
ContentInstaller {
id: content_id.clone(),
mime: content.mime,
content_path: self.dapps_path.clone(),
on_done: Box::new(on_done),
}
);
(Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box<Handler>)
},
None => {
// This may happen when sync status changes in between // This may happen when sync status changes in between
// `contains` and `to_handler` // `contains` and `to_handler`
(None, Box::new(ContentHandler::error( (None, Box::new(ContentHandler::error(
@ -151,14 +179,15 @@ impl<R: URLHint> AppFetcher<R> {
"Requested resource was not found.", "Requested resource was not found.",
None None
)) as Box<Handler>) )) as Box<Handler>)
},
} }
}, },
} }
}; };
if let Some(status) = new_status { if let Some(status) = new_status {
dapps.clear_garbage(MAX_CACHED_DAPPS); cache.clear_garbage(MAX_CACHED_DAPPS);
dapps.insert(app_id, status); cache.insert(content_id, status);
} }
handler handler
@ -169,7 +198,7 @@ impl<R: URLHint> AppFetcher<R> {
pub enum ValidationError { pub enum ValidationError {
Io(io::Error), Io(io::Error),
Zip(zip::result::ZipError), Zip(zip::result::ZipError),
InvalidDappId, InvalidContentId,
ManifestNotFound, ManifestNotFound,
ManifestSerialization(String), ManifestSerialization(String),
HashMismatch { expected: H256, got: H256, }, HashMismatch { expected: H256, got: H256, },
@ -180,7 +209,7 @@ impl fmt::Display for ValidationError {
match *self { match *self {
ValidationError::Io(ref io) => write!(f, "Unexpected IO error occured: {:?}", io), ValidationError::Io(ref io) => write!(f, "Unexpected IO error occured: {:?}", io),
ValidationError::Zip(ref zip) => write!(f, "Unable to read ZIP archive: {:?}", zip), ValidationError::Zip(ref zip) => write!(f, "Unable to read ZIP archive: {:?}", zip),
ValidationError::InvalidDappId => write!(f, "Dapp ID is invalid. It should be 32 bytes hash of content."), ValidationError::InvalidContentId => write!(f, "ID is invalid. It should be 256 bits keccak hash of content."),
ValidationError::ManifestNotFound => write!(f, "Downloaded Dapp bundle did not contain valid manifest.json file."), ValidationError::ManifestNotFound => write!(f, "Downloaded Dapp bundle did not contain valid manifest.json file."),
ValidationError::ManifestSerialization(ref err) => { ValidationError::ManifestSerialization(ref err) => {
write!(f, "There was an error during Dapp Manifest serialization: {:?}", err) write!(f, "There was an error during Dapp Manifest serialization: {:?}", err)
@ -204,10 +233,44 @@ impl From<zip::result::ZipError> for ValidationError {
} }
} }
struct ContentInstaller {
id: String,
mime: String,
content_path: PathBuf,
on_done: Box<Fn(String, Option<LocalPageEndpoint>) + Send>,
}
impl ContentValidator for ContentInstaller {
type Error = ValidationError;
fn validate_and_install(&self, path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> {
// Create dir
try!(fs::create_dir_all(&self.content_path));
// And prepare path for a file
let filename = path.file_name().expect("We always fetch a file.");
let mut content_path = self.content_path.clone();
content_path.push(&filename);
if content_path.exists() {
try!(fs::remove_dir_all(&content_path))
}
try!(fs::copy(&path, &content_path));
Ok((self.id.clone(), LocalPageEndpoint::single_file(content_path, self.mime.clone())))
}
fn done(&self, endpoint: Option<LocalPageEndpoint>) {
(self.on_done)(self.id.clone(), endpoint)
}
}
struct DappInstaller { struct DappInstaller {
dapp_id: String, id: String,
dapps_path: PathBuf, dapps_path: PathBuf,
dapps: Arc<Mutex<ContentCache>>, on_done: Box<Fn(String, Option<LocalPageEndpoint>) + Send>,
} }
impl DappInstaller { impl DappInstaller {
@ -245,14 +308,14 @@ impl DappInstaller {
impl ContentValidator for DappInstaller { impl ContentValidator for DappInstaller {
type Error = ValidationError; type Error = ValidationError;
fn validate_and_install(&self, app_path: PathBuf) -> Result<Manifest, ValidationError> { fn validate_and_install(&self, app_path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> {
trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path); trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path);
let mut file_reader = io::BufReader::new(try!(fs::File::open(app_path))); let mut file_reader = io::BufReader::new(try!(fs::File::open(app_path)));
let hash = try!(sha3(&mut file_reader)); let hash = try!(sha3(&mut file_reader));
let dapp_id = try!(self.dapp_id.as_str().parse().map_err(|_| ValidationError::InvalidDappId)); let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId));
if dapp_id != hash { if id != hash {
return Err(ValidationError::HashMismatch { return Err(ValidationError::HashMismatch {
expected: dapp_id, expected: id,
got: hash, got: hash,
}); });
} }
@ -262,7 +325,7 @@ impl ContentValidator for DappInstaller {
// First find manifest file // First find manifest file
let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip)); let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip));
// Overwrite id to match hash // Overwrite id to match hash
manifest.id = self.dapp_id.clone(); manifest.id = self.id.clone();
let target = self.dapp_target_path(&manifest); let target = self.dapp_target_path(&manifest);
@ -299,23 +362,15 @@ impl ContentValidator for DappInstaller {
let mut manifest_file = try!(fs::File::create(manifest_path)); let mut manifest_file = try!(fs::File::create(manifest_path));
try!(manifest_file.write_all(manifest_str.as_bytes())); try!(manifest_file.write_all(manifest_str.as_bytes()));
// Create endpoint
let app = LocalPageEndpoint::new(target, manifest.clone().into());
// Return modified app manifest // Return modified app manifest
Ok(manifest) Ok((manifest.id.clone(), app))
} }
fn done(&self, manifest: Option<&Manifest>) { fn done(&self, endpoint: Option<LocalPageEndpoint>) {
let mut dapps = self.dapps.lock(); (self.on_done)(self.id.clone(), endpoint)
match manifest {
Some(manifest) => {
let path = self.dapp_target_path(manifest);
let app = LocalPageEndpoint::new(path, manifest.clone().into());
dapps.insert(self.dapp_id.clone(), ContentStatus::Ready(app));
},
// In case of error
None => {
dapps.remove(&self.dapp_id);
},
}
} }
} }
@ -327,12 +382,12 @@ mod tests {
use endpoint::EndpointInfo; use endpoint::EndpointInfo;
use page::LocalPageEndpoint; use page::LocalPageEndpoint;
use apps::cache::ContentStatus; use apps::cache::ContentStatus;
use apps::urlhint::{GithubApp, URLHint}; use apps::urlhint::{URLHint, URLHintResult};
use super::AppFetcher; use super::ContentFetcher;
struct FakeResolver; struct FakeResolver;
impl URLHint for FakeResolver { impl URLHint for FakeResolver {
fn resolve(&self, _app_id: Bytes) -> Option<GithubApp> { fn resolve(&self, _id: Bytes) -> Option<URLHintResult> {
None None
} }
} }
@ -341,7 +396,7 @@ mod tests {
fn should_true_if_contains_the_app() { fn should_true_if_contains_the_app() {
// given // given
let path = env::temp_dir(); let path = env::temp_dir();
let fetcher = AppFetcher::new(FakeResolver, Arc::new(|| false)); let fetcher = ContentFetcher::new(FakeResolver, Arc::new(|| false));
let handler = LocalPageEndpoint::new(path, EndpointInfo { let handler = LocalPageEndpoint::new(path, EndpointInfo {
name: "fake".into(), name: "fake".into(),
description: "".into(), description: "".into(),

View File

@ -17,6 +17,7 @@
use std::fmt; use std::fmt;
use std::sync::Arc; use std::sync::Arc;
use rustc_serialize::hex::ToHex; use rustc_serialize::hex::ToHex;
use mime_guess;
use ethabi::{Interface, Contract, Token}; use ethabi::{Interface, Contract, Token};
use util::{Address, Bytes, Hashable}; use util::{Address, Bytes, Hashable};
@ -52,6 +53,13 @@ impl GithubApp {
} }
} }
#[derive(Debug, PartialEq)]
pub struct Content {
pub url: String,
pub mime: String,
pub owner: Address,
}
/// RAW Contract interface. /// RAW Contract interface.
/// Should execute transaction using current blockchain state. /// Should execute transaction using current blockchain state.
pub trait ContractClient: Send + Sync { pub trait ContractClient: Send + Sync {
@ -61,10 +69,19 @@ pub trait ContractClient: Send + Sync {
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String>; fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String>;
} }
/// Result of resolving id to URL
#[derive(Debug, PartialEq)]
pub enum URLHintResult {
/// Dapp
Dapp(GithubApp),
/// Content
Content(Content),
}
/// URLHint Contract interface /// URLHint Contract interface
pub trait URLHint { pub trait URLHint {
/// Resolves given id to registrar entry. /// Resolves given id to registrar entry.
fn resolve(&self, app_id: Bytes) -> Option<GithubApp>; fn resolve(&self, id: Bytes) -> Option<URLHintResult>;
} }
pub struct URLHintContract { pub struct URLHintContract {
@ -110,10 +127,10 @@ impl URLHintContract {
} }
} }
fn encode_urlhint_call(&self, app_id: Bytes) -> Option<Bytes> { fn encode_urlhint_call(&self, id: Bytes) -> Option<Bytes> {
let call = self.urlhint let call = self.urlhint
.function("entries".into()) .function("entries".into())
.and_then(|f| f.encode_call(vec![Token::FixedBytes(app_id)])); .and_then(|f| f.encode_call(vec![Token::FixedBytes(id)]));
match call { match call {
Ok(res) => { Ok(res) => {
@ -126,7 +143,7 @@ impl URLHintContract {
} }
} }
fn decode_urlhint_output(&self, output: Bytes) -> Option<GithubApp> { fn decode_urlhint_output(&self, output: Bytes) -> Option<URLHintResult> {
trace!(target: "dapps", "Output: {:?}", output.to_hex()); trace!(target: "dapps", "Output: {:?}", output.to_hex());
let output = self.urlhint let output = self.urlhint
.function("entries".into()) .function("entries".into())
@ -149,6 +166,17 @@ impl URLHintContract {
if owner == Address::default() { if owner == Address::default() {
return None; return None;
} }
let commit = GithubApp::commit(&commit);
if commit == Some(Default::default()) {
let mime = guess_mime_type(&account_slash_repo).unwrap_or("application/octet-stream".into());
return Some(URLHintResult::Content(Content {
url: account_slash_repo,
mime: mime,
owner: owner,
}));
}
let (account, repo) = { let (account, repo) = {
let mut it = account_slash_repo.split('/'); let mut it = account_slash_repo.split('/');
match (it.next(), it.next()) { match (it.next(), it.next()) {
@ -157,12 +185,12 @@ impl URLHintContract {
} }
}; };
GithubApp::commit(&commit).map(|commit| GithubApp { commit.map(|commit| URLHintResult::Dapp(GithubApp {
account: account, account: account,
repo: repo, repo: repo,
commit: commit, commit: commit,
owner: owner, owner: owner,
}) }))
}, },
e => { e => {
warn!(target: "dapps", "Invalid contract output parameters: {:?}", e); warn!(target: "dapps", "Invalid contract output parameters: {:?}", e);
@ -177,10 +205,10 @@ impl URLHintContract {
} }
impl URLHint for URLHintContract { impl URLHint for URLHintContract {
fn resolve(&self, app_id: Bytes) -> Option<GithubApp> { fn resolve(&self, id: Bytes) -> Option<URLHintResult> {
self.urlhint_address().and_then(|address| { self.urlhint_address().and_then(|address| {
// Prepare contract call // Prepare contract call
self.encode_urlhint_call(app_id) self.encode_urlhint_call(id)
.and_then(|data| { .and_then(|data| {
let call = self.client.call(address, data); let call = self.client.call(address, data);
if let Err(ref e) = call { if let Err(ref e) = call {
@ -193,6 +221,34 @@ impl URLHint for URLHintContract {
} }
} }
fn guess_mime_type(url: &str) -> Option<String> {
const CONTENT_TYPE: &'static str = "content-type=";
let mut it = url.split('#');
// skip url
let url = it.next();
// get meta headers
let metas = it.next();
if let Some(metas) = metas {
for meta in metas.split('&') {
let meta = meta.to_lowercase();
if meta.starts_with(CONTENT_TYPE) {
return Some(meta[CONTENT_TYPE.len()..].to_owned());
}
}
}
url.and_then(|url| {
url.split('.').last()
}).and_then(|extension| {
mime_guess::get_mime_type_str(extension).map(Into::into)
})
}
#[cfg(test)]
pub fn test_guess_mime_type(url: &str) -> Option<String> {
guess_mime_type(url)
}
fn as_string<T: fmt::Debug>(e: T) -> String { fn as_string<T: fmt::Debug>(e: T) -> String {
format!("{:?}", e) format!("{:?}", e)
} }
@ -201,7 +257,7 @@ fn as_string<T: fmt::Debug>(e: T) -> String {
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::str::FromStr; use std::str::FromStr;
use rustc_serialize::hex::{ToHex, FromHex}; use rustc_serialize::hex::FromHex;
use super::*; use super::*;
use util::{Bytes, Address, Mutex, ToPretty}; use util::{Bytes, Address, Mutex, ToPretty};
@ -279,12 +335,33 @@ mod tests {
let res = urlhint.resolve("test".bytes().collect()); let res = urlhint.resolve("test".bytes().collect());
// then // then
assert_eq!(res, Some(GithubApp { assert_eq!(res, Some(URLHintResult::Dapp(GithubApp {
account: "ethcore".into(), account: "ethcore".into(),
repo: "dao.claim".into(), repo: "dao.claim".into(),
commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(), commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(),
owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(),
})) })))
}
#[test]
fn should_decode_urlhint_content_output() {
// given
let mut registrar = FakeRegistrar::new();
registrar.responses = Mutex::new(vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003d68747470733a2f2f657468636f72652e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e67000000".from_hex().unwrap()),
]);
let urlhint = URLHintContract::new(Arc::new(registrar));
// when
let res = urlhint.resolve("test".bytes().collect());
// then
assert_eq!(res, Some(URLHintResult::Content(Content {
url: "https://ethcore.io/assets/images/ethcore-black-horizontal.png".into(),
mime: "image/png".into(),
owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(),
})))
} }
#[test] #[test]
@ -303,4 +380,20 @@ mod tests {
// then // then
assert_eq!(url, "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213".to_owned()); assert_eq!(url, "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213".to_owned());
} }
#[test]
fn should_guess_mime_type_from_url() {
let url1 = "https://ethcore.io/parity";
let url2 = "https://ethcore.io/parity#content-type=image/png";
let url3 = "https://ethcore.io/parity#something&content-type=image/png";
let url4 = "https://ethcore.io/parity.png#content-type=image/jpeg";
let url5 = "https://ethcore.io/parity.png";
assert_eq!(test_guess_mime_type(url1), None);
assert_eq!(test_guess_mime_type(url2), Some("image/png".into()));
assert_eq!(test_guess_mime_type(url3), Some("image/png".into()));
assert_eq!(test_guess_mime_type(url4), Some("image/jpeg".into()));
assert_eq!(test_guess_mime_type(url5), Some("image/png".into()));
}
} }

View File

@ -42,7 +42,9 @@ pub type Handler = server::Handler<net::HttpStream> + Send;
pub trait Endpoint : Send + Sync { pub trait Endpoint : Send + Sync {
fn info(&self) -> Option<&EndpointInfo> { None } fn info(&self) -> Option<&EndpointInfo> { None }
fn to_handler(&self, path: EndpointPath) -> Box<Handler>; fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
panic!("This Endpoint is asynchronous and requires Control object.");
}
fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box<Handler> { fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box<Handler> {
self.to_handler(path) self.to_handler(path)

View File

@ -3,7 +3,6 @@
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
<meta name="viewport" content="width=device-width"> <meta name="viewport" content="width=device-width">
{meta}
<title>{title}</title> <title>{title}</title>
<link rel="stylesheet" href="/parity-utils/styles.css"> <link rel="stylesheet" href="/parity-utils/styles.css">
</head> </head>

View File

@ -63,7 +63,7 @@ impl Client {
self.https_client.close(); self.https_client.close();
} }
pub fn request(&mut self, url: String, abort: Arc<AtomicBool>, on_done: Box<Fn() + Send>) -> Result<mpsc::Receiver<FetchResult>, FetchError> { pub fn request(&mut self, url: &str, abort: Arc<AtomicBool>, on_done: Box<Fn() + Send>) -> Result<mpsc::Receiver<FetchResult>, FetchError> {
let is_https = url.starts_with("https://"); let is_https = url.starts_with("https://");
let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl)); let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl));
trace!(target: "dapps", "Fetching from: {:?}", url); trace!(target: "dapps", "Fetching from: {:?}", url);

View File

@ -23,6 +23,7 @@ use hyper::status::StatusCode;
use util::version; use util::version;
#[derive(Clone)]
pub struct ContentHandler { pub struct ContentHandler {
code: StatusCode, code: StatusCode,
content: String, content: String,
@ -57,18 +58,6 @@ impl ContentHandler {
Self::html(code, format!( Self::html(code, format!(
include_str!("../error_tpl.html"), include_str!("../error_tpl.html"),
title=title, title=title,
meta="",
message=message,
details=details.unwrap_or_else(|| ""),
version=version(),
))
}
pub fn error_with_refresh(code: StatusCode, title: &str, message: &str, details: Option<&str>) -> Self {
Self::html(code, format!(
include_str!("../error_tpl.html"),
title=title,
meta="<meta http-equiv=\"refresh\" content=\"1\">",
message=message, message=message,
details=details.unwrap_or_else(|| ""), details=details.unwrap_or_else(|| ""),
version=version(), version=version(),

View File

@ -16,78 +16,160 @@
//! Hyper Server Handler that fetches a file during a request (proxy). //! Hyper Server Handler that fetches a file during a request (proxy).
use std::fmt; use std::{fs, fmt};
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{mpsc, Arc}; use std::sync::{mpsc, Arc};
use std::sync::atomic::AtomicBool; use std::sync::atomic::{AtomicBool, Ordering};
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
use util::Mutex;
use hyper::{header, server, Decoder, Encoder, Next, Method, Control}; use hyper::{server, Decoder, Encoder, Next, Method, Control};
use hyper::net::HttpStream; use hyper::net::HttpStream;
use hyper::status::StatusCode; use hyper::status::StatusCode;
use handlers::ContentHandler; use handlers::{ContentHandler, Redirection};
use handlers::client::{Client, FetchResult}; use handlers::client::{Client, FetchResult};
use apps::redirection_address; use apps::redirection_address;
use apps::urlhint::GithubApp; use page::LocalPageEndpoint;
use apps::manifest::Manifest;
const FETCH_TIMEOUT: u64 = 30; const FETCH_TIMEOUT: u64 = 30;
enum FetchState { enum FetchState {
NotStarted(GithubApp), NotStarted(String),
Error(ContentHandler), Error(ContentHandler),
InProgress { InProgress(mpsc::Receiver<FetchResult>),
deadline: Instant, Done(String, LocalPageEndpoint, Redirection),
receiver: mpsc::Receiver<FetchResult>,
},
Done(Manifest),
} }
pub trait ContentValidator { pub trait ContentValidator {
type Error: fmt::Debug + fmt::Display; type Error: fmt::Debug + fmt::Display;
fn validate_and_install(&self, app: PathBuf) -> Result<Manifest, Self::Error>; fn validate_and_install(&self, app: PathBuf) -> Result<(String, LocalPageEndpoint), Self::Error>;
fn done(&self, Option<&Manifest>); fn done(&self, Option<LocalPageEndpoint>);
}
pub struct FetchControl {
abort: Arc<AtomicBool>,
listeners: Mutex<Vec<(Control, mpsc::Sender<FetchState>)>>,
deadline: Instant,
}
impl Default for FetchControl {
fn default() -> Self {
FetchControl {
abort: Arc::new(AtomicBool::new(false)),
listeners: Mutex::new(Vec::new()),
deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT),
}
}
}
impl FetchControl {
fn notify<F: Fn() -> FetchState>(&self, status: F) {
let mut listeners = self.listeners.lock();
for (control, sender) in listeners.drain(..) {
if let Err(e) = sender.send(status()) {
trace!(target: "dapps", "Waiting listener notification failed: {:?}", e);
} else {
let _ = control.ready(Next::read());
}
}
}
fn set_status(&self, status: &FetchState) {
match *status {
FetchState::Error(ref handler) => self.notify(|| FetchState::Error(handler.clone())),
FetchState::Done(ref id, ref endpoint, ref handler) => self.notify(|| FetchState::Done(id.clone(), endpoint.clone(), handler.clone())),
FetchState::NotStarted(_) | FetchState::InProgress(_) => {},
}
}
pub fn abort(&self) {
self.abort.store(true, Ordering::SeqCst);
}
pub fn to_handler(&self, control: Control) -> Box<server::Handler<HttpStream> + Send> {
let (tx, rx) = mpsc::channel();
self.listeners.lock().push((control, tx));
Box::new(WaitingHandler {
receiver: rx,
state: None,
})
}
}
pub struct WaitingHandler {
receiver: mpsc::Receiver<FetchState>,
state: Option<FetchState>,
}
impl server::Handler<HttpStream> for WaitingHandler {
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
Next::wait()
}
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
self.state = self.receiver.try_recv().ok();
Next::write()
}
fn on_response(&mut self, res: &mut server::Response) -> Next {
match self.state {
Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response(res),
Some(FetchState::Error(ref mut handler)) => handler.on_response(res),
_ => Next::end(),
}
}
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
match self.state {
Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response_writable(encoder),
Some(FetchState::Error(ref mut handler)) => handler.on_response_writable(encoder),
_ => Next::end(),
}
}
} }
pub struct ContentFetcherHandler<H: ContentValidator> { pub struct ContentFetcherHandler<H: ContentValidator> {
abort: Arc<AtomicBool>, fetch_control: Arc<FetchControl>,
control: Option<Control>, control: Option<Control>,
status: FetchState, status: FetchState,
client: Option<Client>, client: Option<Client>,
using_dapps_domains: bool, using_dapps_domains: bool,
dapp: H, installer: H,
} }
impl<H: ContentValidator> Drop for ContentFetcherHandler<H> { impl<H: ContentValidator> Drop for ContentFetcherHandler<H> {
fn drop(&mut self) { fn drop(&mut self) {
let manifest = match self.status { let result = match self.status {
FetchState::Done(ref manifest) => Some(manifest), FetchState::Done(_, ref result, _) => Some(result.clone()),
_ => None, _ => None,
}; };
self.dapp.done(manifest); self.installer.done(result);
} }
} }
impl<H: ContentValidator> ContentFetcherHandler<H> { impl<H: ContentValidator> ContentFetcherHandler<H> {
pub fn new( pub fn new(
app: GithubApp, url: String,
abort: Arc<AtomicBool>,
control: Control, control: Control,
using_dapps_domains: bool, using_dapps_domains: bool,
handler: H) -> Self { handler: H) -> (Self, Arc<FetchControl>) {
let fetch_control = Arc::new(FetchControl::default());
let client = Client::new(); let client = Client::new();
ContentFetcherHandler { let handler = ContentFetcherHandler {
abort: abort, fetch_control: fetch_control.clone(),
control: Some(control), control: Some(control),
client: Some(client), client: Some(client),
status: FetchState::NotStarted(app), status: FetchState::NotStarted(url),
using_dapps_domains: using_dapps_domains, using_dapps_domains: using_dapps_domains,
dapp: handler, installer: handler,
} };
(handler, fetch_control)
} }
fn close_client(client: &mut Option<Client>) { fn close_client(client: &mut Option<Client>) {
@ -96,9 +178,8 @@ impl<H: ContentValidator> ContentFetcherHandler<H> {
.close(); .close();
} }
fn fetch_content(client: &mut Client, url: &str, abort: Arc<AtomicBool>, control: Control) -> Result<mpsc::Receiver<FetchResult>, String> {
fn fetch_app(client: &mut Client, app: &GithubApp, abort: Arc<AtomicBool>, control: Control) -> Result<mpsc::Receiver<FetchResult>, String> { client.request(url, abort, Box::new(move || {
client.request(app.url(), abort, Box::new(move || {
trace!(target: "dapps", "Fetching finished."); trace!(target: "dapps", "Fetching finished.");
// Ignoring control errors // Ignoring control errors
let _ = control.ready(Next::read()); let _ = control.ready(Next::read());
@ -108,19 +189,16 @@ impl<H: ContentValidator> ContentFetcherHandler<H> {
impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<H> { impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<H> {
fn on_request(&mut self, request: server::Request<HttpStream>) -> Next { fn on_request(&mut self, request: server::Request<HttpStream>) -> Next {
let status = if let FetchState::NotStarted(ref app) = self.status { let status = if let FetchState::NotStarted(ref url) = self.status {
Some(match *request.method() { Some(match *request.method() {
// Start fetching content // Start fetching content
Method::Get => { Method::Get => {
trace!(target: "dapps", "Fetching dapp: {:?}", app); trace!(target: "dapps", "Fetching content from: {:?}", url);
let control = self.control.take().expect("on_request is called only once, thus control is always Some"); let control = self.control.take().expect("on_request is called only once, thus control is always Some");
let client = self.client.as_mut().expect("on_request is called before client is closed."); let client = self.client.as_mut().expect("on_request is called before client is closed.");
let fetch = Self::fetch_app(client, app, self.abort.clone(), control); let fetch = Self::fetch_content(client, url, self.fetch_control.abort.clone(), control);
match fetch { match fetch {
Ok(receiver) => FetchState::InProgress { Ok(receiver) => FetchState::InProgress(receiver),
deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT),
receiver: receiver,
},
Err(e) => FetchState::Error(ContentHandler::error( Err(e) => FetchState::Error(ContentHandler::error(
StatusCode::BadGateway, StatusCode::BadGateway,
"Unable To Start Dapp Download", "Unable To Start Dapp Download",
@ -140,6 +218,7 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
} else { None }; } else { None };
if let Some(status) = status { if let Some(status) = status {
self.fetch_control.set_status(&status);
self.status = status; self.status = status;
} }
@ -149,49 +228,51 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
fn on_request_readable(&mut self, decoder: &mut Decoder<HttpStream>) -> Next { fn on_request_readable(&mut self, decoder: &mut Decoder<HttpStream>) -> Next {
let (status, next) = match self.status { let (status, next) = match self.status {
// Request may time out // Request may time out
FetchState::InProgress { ref deadline, .. } if *deadline < Instant::now() => { FetchState::InProgress(_) if self.fetch_control.deadline < Instant::now() => {
trace!(target: "dapps", "Fetching dapp failed because of timeout."); trace!(target: "dapps", "Fetching dapp failed because of timeout.");
let timeout = ContentHandler::error( let timeout = ContentHandler::error(
StatusCode::GatewayTimeout, StatusCode::GatewayTimeout,
"Download Timeout", "Download Timeout",
&format!("Could not fetch dapp bundle within {} seconds.", FETCH_TIMEOUT), &format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT),
None None
); );
Self::close_client(&mut self.client); Self::close_client(&mut self.client);
(Some(FetchState::Error(timeout)), Next::write()) (Some(FetchState::Error(timeout)), Next::write())
}, },
FetchState::InProgress { ref receiver, .. } => { FetchState::InProgress(ref receiver) => {
// Check if there is an answer // Check if there is an answer
let rec = receiver.try_recv(); let rec = receiver.try_recv();
match rec { match rec {
// Unpack and validate // Unpack and validate
Ok(Ok(path)) => { Ok(Ok(path)) => {
trace!(target: "dapps", "Fetching dapp finished. Starting validation."); trace!(target: "dapps", "Fetching content finished. Starting validation ({:?})", path);
Self::close_client(&mut self.client); Self::close_client(&mut self.client);
// Unpack and verify // Unpack and verify
let state = match self.dapp.validate_and_install(path.clone()) { let state = match self.installer.validate_and_install(path.clone()) {
Err(e) => { Err(e) => {
trace!(target: "dapps", "Error while validating dapp: {:?}", e); trace!(target: "dapps", "Error while validating content: {:?}", e);
FetchState::Error(ContentHandler::error( FetchState::Error(ContentHandler::error(
StatusCode::BadGateway, StatusCode::BadGateway,
"Invalid Dapp", "Invalid Dapp",
"Downloaded bundle does not contain a valid dapp.", "Downloaded bundle does not contain a valid content.",
Some(&format!("{:?}", e)) Some(&format!("{:?}", e))
)) ))
}, },
Ok(manifest) => FetchState::Done(manifest) Ok((id, result)) => {
let address = redirection_address(self.using_dapps_domains, &id);
FetchState::Done(id, result, Redirection::new(&address))
},
}; };
// Remove temporary zip file // Remove temporary zip file
// TODO [todr] Uncomment me let _ = fs::remove_file(path);
// let _ = fs::remove_file(path);
(Some(state), Next::write()) (Some(state), Next::write())
}, },
Ok(Err(e)) => { Ok(Err(e)) => {
warn!(target: "dapps", "Unable to fetch new dapp: {:?}", e); warn!(target: "dapps", "Unable to fetch content: {:?}", e);
let error = ContentHandler::error( let error = ContentHandler::error(
StatusCode::BadGateway, StatusCode::BadGateway,
"Download Error", "Download Error",
"There was an error when fetching the dapp.", "There was an error when fetching the content.",
Some(&format!("{:?}", e)), Some(&format!("{:?}", e)),
); );
(Some(FetchState::Error(error)), Next::write()) (Some(FetchState::Error(error)), Next::write())
@ -205,6 +286,7 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
}; };
if let Some(status) = status { if let Some(status) = status {
self.fetch_control.set_status(&status);
self.status = status; self.status = status;
} }
@ -213,12 +295,7 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
fn on_response(&mut self, res: &mut server::Response) -> Next { fn on_response(&mut self, res: &mut server::Response) -> Next {
match self.status { match self.status {
FetchState::Done(ref manifest) => { FetchState::Done(_, _, ref mut handler) => handler.on_response(res),
trace!(target: "dapps", "Fetching dapp finished. Redirecting to {}", manifest.id);
res.set_status(StatusCode::Found);
res.headers_mut().set(header::Location(redirection_address(self.using_dapps_domains, &manifest.id)));
Next::write()
},
FetchState::Error(ref mut handler) => handler.on_response(res), FetchState::Error(ref mut handler) => handler.on_response(res),
_ => Next::end(), _ => Next::end(),
} }
@ -226,9 +303,9 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next { fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
match self.status { match self.status {
FetchState::Done(_, _, ref mut handler) => handler.on_response_writable(encoder),
FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), FetchState::Error(ref mut handler) => handler.on_response_writable(encoder),
_ => Next::end(), _ => Next::end(),
} }
} }
} }

View File

@ -27,7 +27,7 @@ pub use self::auth::AuthRequiredHandler;
pub use self::echo::EchoHandler; pub use self::echo::EchoHandler;
pub use self::content::ContentHandler; pub use self::content::ContentHandler;
pub use self::redirect::Redirection; pub use self::redirect::Redirection;
pub use self::fetch::{ContentFetcherHandler, ContentValidator}; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl};
use url::Url; use url::Url;
use hyper::{server, header, net, uri}; use hyper::{server, header, net, uri};

View File

@ -20,15 +20,20 @@ use hyper::{header, server, Decoder, Encoder, Next};
use hyper::net::HttpStream; use hyper::net::HttpStream;
use hyper::status::StatusCode; use hyper::status::StatusCode;
#[derive(Clone)]
pub struct Redirection { pub struct Redirection {
to_url: String to_url: String
} }
impl Redirection { impl Redirection {
pub fn new(url: &str) -> Box<Self> { pub fn new(url: &str) -> Self {
Box::new(Redirection { Redirection {
to_url: url.to_owned() to_url: url.to_owned()
}) }
}
pub fn boxed(url: &str) -> Box<Self> {
Box::new(Self::new(url))
} }
} }

View File

@ -191,13 +191,16 @@ impl Server {
) -> Result<Server, ServerError> { ) -> Result<Server, ServerError> {
let panic_handler = Arc::new(Mutex::new(None)); let panic_handler = Arc::new(Mutex::new(None));
let authorization = Arc::new(authorization); let authorization = Arc::new(authorization);
let apps_fetcher = Arc::new(apps::fetcher::AppFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status)); let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status));
let endpoints = Arc::new(apps::all_endpoints(dapps_path)); let endpoints = Arc::new(apps::all_endpoints(dapps_path));
let special = Arc::new({ let special = Arc::new({
let mut special = HashMap::new(); let mut special = HashMap::new();
special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone())); special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone()));
special.insert(router::SpecialEndpoint::Api, api::RestApi::new(format!("{}", addr), endpoints.clone()));
special.insert(router::SpecialEndpoint::Utils, apps::utils()); special.insert(router::SpecialEndpoint::Utils, apps::utils());
special.insert(
router::SpecialEndpoint::Api,
api::RestApi::new(format!("{}", addr), endpoints.clone(), content_fetcher.clone())
);
special special
}); });
let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); let hosts = Self::allowed_hosts(hosts, format!("{}", addr));
@ -206,7 +209,7 @@ impl Server {
.handle(move |ctrl| router::Router::new( .handle(move |ctrl| router::Router::new(
ctrl, ctrl,
apps::main_page(), apps::main_page(),
apps_fetcher.clone(), content_fetcher.clone(),
endpoints.clone(), endpoints.clone(),
special.clone(), special.clone(),
authorization.clone(), authorization.clone(),

View File

@ -17,20 +17,31 @@
use mime_guess; use mime_guess;
use std::io::{Seek, Read, SeekFrom}; use std::io::{Seek, Read, SeekFrom};
use std::fs; use std::fs;
use std::path::PathBuf; use std::path::{Path, PathBuf};
use page::handler; use page::handler;
use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler};
#[derive(Debug, Clone)]
pub struct LocalPageEndpoint { pub struct LocalPageEndpoint {
path: PathBuf, path: PathBuf,
info: EndpointInfo, mime: Option<String>,
info: Option<EndpointInfo>,
} }
impl LocalPageEndpoint { impl LocalPageEndpoint {
pub fn new(path: PathBuf, info: EndpointInfo) -> Self { pub fn new(path: PathBuf, info: EndpointInfo) -> Self {
LocalPageEndpoint { LocalPageEndpoint {
path: path, path: path,
info: info, mime: None,
info: Some(info),
}
}
pub fn single_file(path: PathBuf, mime: String) -> Self {
LocalPageEndpoint {
path: path,
mime: Some(mime),
info: None,
} }
} }
@ -41,12 +52,21 @@ impl LocalPageEndpoint {
impl Endpoint for LocalPageEndpoint { impl Endpoint for LocalPageEndpoint {
fn info(&self) -> Option<&EndpointInfo> { fn info(&self) -> Option<&EndpointInfo> {
Some(&self.info) self.info.as_ref()
} }
fn to_handler(&self, path: EndpointPath) -> Box<Handler> { fn to_handler(&self, path: EndpointPath) -> Box<Handler> {
if let Some(ref mime) = self.mime {
Box::new(handler::PageHandler { Box::new(handler::PageHandler {
app: LocalDapp::new(self.path.clone()), app: LocalSingleFile { path: self.path.clone(), mime: mime.clone() },
prefix: None,
path: path,
file: Default::default(),
safe_to_embed: false,
})
} else {
Box::new(handler::PageHandler {
app: LocalDapp { path: self.path.clone() },
prefix: None, prefix: None,
path: path, path: path,
file: Default::default(), file: Default::default(),
@ -54,19 +74,25 @@ impl Endpoint for LocalPageEndpoint {
}) })
} }
} }
}
struct LocalSingleFile {
path: PathBuf,
mime: String,
}
impl handler::Dapp for LocalSingleFile {
type DappFile = LocalFile;
fn file(&self, _path: &str) -> Option<Self::DappFile> {
LocalFile::from_path(&self.path, Some(&self.mime))
}
}
struct LocalDapp { struct LocalDapp {
path: PathBuf, path: PathBuf,
} }
impl LocalDapp {
fn new(path: PathBuf) -> Self {
LocalDapp {
path: path
}
}
}
impl handler::Dapp for LocalDapp { impl handler::Dapp for LocalDapp {
type DappFile = LocalFile; type DappFile = LocalFile;
@ -75,18 +101,7 @@ impl handler::Dapp for LocalDapp {
for part in file_path.split('/') { for part in file_path.split('/') {
path.push(part); path.push(part);
} }
// Check if file exists LocalFile::from_path(&path, None)
fs::File::open(path.clone()).ok().map(|file| {
let content_type = mime_guess::guess_mime_type(path);
let len = file.metadata().ok().map_or(0, |meta| meta.len());
LocalFile {
content_type: content_type.to_string(),
buffer: [0; 4096],
file: file,
pos: 0,
len: len,
}
})
} }
} }
@ -98,6 +113,24 @@ struct LocalFile {
pos: u64, pos: u64,
} }
impl LocalFile {
fn from_path<P: AsRef<Path>>(path: P, mime: Option<&str>) -> Option<Self> {
// Check if file exists
fs::File::open(&path).ok().map(|file| {
let content_type = mime.map(|mime| mime.to_owned())
.unwrap_or_else(|| mime_guess::guess_mime_type(path).to_string());
let len = file.metadata().ok().map_or(0, |meta| meta.len());
LocalFile {
content_type: content_type,
buffer: [0; 4096],
file: file,
pos: 0,
len: len,
}
})
}
}
impl handler::DappFile for LocalFile { impl handler::DappFile for LocalFile {
fn content_type(&self) -> &str { fn content_type(&self) -> &str {
&self.content_type &self.content_type

View File

@ -27,7 +27,7 @@ use url::{Url, Host};
use hyper::{self, server, Next, Encoder, Decoder, Control, StatusCode}; use hyper::{self, server, Next, Encoder, Decoder, Control, StatusCode};
use hyper::net::HttpStream; use hyper::net::HttpStream;
use apps; use apps;
use apps::fetcher::AppFetcher; use apps::fetcher::ContentFetcher;
use endpoint::{Endpoint, Endpoints, EndpointPath}; use endpoint::{Endpoint, Endpoints, EndpointPath};
use handlers::{Redirection, extract_url, ContentHandler}; use handlers::{Redirection, extract_url, ContentHandler};
use self::auth::{Authorization, Authorized}; use self::auth::{Authorization, Authorized};
@ -45,7 +45,7 @@ pub struct Router<A: Authorization + 'static> {
control: Option<Control>, control: Option<Control>,
main_page: &'static str, main_page: &'static str,
endpoints: Arc<Endpoints>, endpoints: Arc<Endpoints>,
fetch: Arc<AppFetcher>, fetch: Arc<ContentFetcher>,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>, special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>,
authorization: Arc<A>, authorization: Arc<A>,
allowed_hosts: Option<Vec<String>>, allowed_hosts: Option<Vec<String>>,
@ -91,7 +91,7 @@ impl<A: Authorization + 'static> server::Handler<HttpStream> for Router<A> {
(Some(ref path), _) if self.fetch.contains(&path.app_id) => { (Some(ref path), _) if self.fetch.contains(&path.app_id) => {
self.fetch.to_async_handler(path.clone(), control) self.fetch.to_async_handler(path.clone(), control)
}, },
// Redirection to main page (maybe 404 instead?) // 404 for non-existent content
(Some(ref path), _) if *req.method() == hyper::method::Method::Get => { (Some(ref path), _) if *req.method() == hyper::method::Method::Get => {
let address = apps::redirection_address(path.using_dapps_domains, self.main_page); let address = apps::redirection_address(path.using_dapps_domains, self.main_page);
Box::new(ContentHandler::error( Box::new(ContentHandler::error(
@ -104,7 +104,7 @@ impl<A: Authorization + 'static> server::Handler<HttpStream> for Router<A> {
// Redirect any GET request to home. // Redirect any GET request to home.
_ if *req.method() == hyper::method::Method::Get => { _ if *req.method() == hyper::method::Method::Get => {
let address = apps::redirection_address(false, self.main_page); let address = apps::redirection_address(false, self.main_page);
Redirection::new(address.as_str()) Redirection::boxed(address.as_str())
}, },
// RPC by default // RPC by default
_ => { _ => {
@ -136,19 +136,19 @@ impl<A: Authorization> Router<A> {
pub fn new( pub fn new(
control: Control, control: Control,
main_page: &'static str, main_page: &'static str,
app_fetcher: Arc<AppFetcher>, content_fetcher: Arc<ContentFetcher>,
endpoints: Arc<Endpoints>, endpoints: Arc<Endpoints>,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>, special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>,
authorization: Arc<A>, authorization: Arc<A>,
allowed_hosts: Option<Vec<String>>, allowed_hosts: Option<Vec<String>>,
) -> Self { ) -> Self {
let handler = special.get(&SpecialEndpoint::Api).unwrap().to_handler(EndpointPath::default()); let handler = special.get(&SpecialEndpoint::Utils).unwrap().to_handler(EndpointPath::default());
Router { Router {
control: Some(control), control: Some(control),
main_page: main_page, main_page: main_page,
endpoints: endpoints, endpoints: endpoints,
fetch: app_fetcher, fetch: content_fetcher,
special: special, special: special,
authorization: authorization, authorization: authorization,
allowed_hosts: allowed_hosts, allowed_hosts: allowed_hosts,

View File

@ -38,10 +38,6 @@ struct RpcEndpoint {
} }
impl Endpoint for RpcEndpoint { impl Endpoint for RpcEndpoint {
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
panic!("RPC Endpoint is asynchronous and requires Control object.");
}
fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box<Handler> { fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box<Handler> {
let panic_handler = PanicHandler { handler: self.panic_handler.clone() }; let panic_handler = PanicHandler { handler: self.panic_handler.clone() };
Box::new(ServerHandler::new( Box::new(ServerHandler::new(

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use tests::helpers::{serve, request}; use tests::helpers::{serve, serve_with_registrar, request};
#[test] #[test]
fn should_return_error() { fn should_return_error() {
@ -82,3 +82,24 @@ fn should_handle_ping() {
assert_eq!(response.body, "0\n\n".to_owned()); assert_eq!(response.body, "0\n\n".to_owned());
} }
#[test]
fn should_try_to_resolve_dapp() {
// given
let (server, registrar) = serve_with_registrar();
// when
let response = request(server,
"\
GET /api/content/1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d HTTP/1.1\r\n\
Host: home.parity\r\n\
Connection: close\r\n\
\r\n\
"
);
// then
assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned());
assert_eq!(registrar.calls.lock().len(), 2);
}

View File

@ -17,7 +17,7 @@
use std::env; use std::env;
use std::str; use std::str;
use std::sync::Arc; use std::sync::Arc;
use rustc_serialize::hex::{ToHex, FromHex}; use rustc_serialize::hex::FromHex;
use ServerBuilder; use ServerBuilder;
use Server; use Server;

View File

@ -115,7 +115,7 @@ fn should_serve_rpc() {
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#));
} }
#[test] #[test]
@ -137,7 +137,7 @@ fn should_serve_rpc_at_slash_rpc() {
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#));
} }

View File

@ -11,7 +11,7 @@ build = "build.rs"
ethcore-ipc-codegen = { path = "../ipc/codegen" } ethcore-ipc-codegen = { path = "../ipc/codegen" }
[dependencies] [dependencies]
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }

View File

@ -460,7 +460,7 @@ mod client_tests {
crossbeam::scope(move |scope| { crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false)); let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url); run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap(); let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap(); client.open_default(path.as_str().to_owned()).unwrap();
client.put("xxx".as_bytes(), "1".as_bytes()).unwrap(); client.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
client.close().unwrap(); client.close().unwrap();
@ -477,7 +477,7 @@ mod client_tests {
crossbeam::scope(move |scope| { crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false)); let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url); run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap(); let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap(); client.open_default(path.as_str().to_owned()).unwrap();
client.put("xxx".as_bytes(), "1".as_bytes()).unwrap(); client.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
@ -498,7 +498,7 @@ mod client_tests {
crossbeam::scope(move |scope| { crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false)); let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url); run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap(); let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap(); client.open_default(path.as_str().to_owned()).unwrap();
assert!(client.get("xxx".as_bytes()).unwrap().is_none()); assert!(client.get("xxx".as_bytes()).unwrap().is_none());
@ -516,7 +516,7 @@ mod client_tests {
crossbeam::scope(move |scope| { crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false)); let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url); run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap(); let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap(); client.open_default(path.as_str().to_owned()).unwrap();
let transaction = DBTransaction::new(); let transaction = DBTransaction::new();
@ -541,7 +541,7 @@ mod client_tests {
let stop = StopGuard::new(); let stop = StopGuard::new();
run_worker(&scope, stop.share(), url); run_worker(&scope, stop.share(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap(); let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap(); client.open_default(path.as_str().to_owned()).unwrap();
let mut batch = Vec::new(); let mut batch = Vec::new();

View File

@ -66,13 +66,13 @@ pub fn extras_service_url(db_path: &str) -> Result<String, ::std::io::Error> {
pub fn blocks_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> { pub fn blocks_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> {
let url = try!(blocks_service_url(db_path)); let url = try!(blocks_service_url(db_path));
let client = try!(nanoipc::init_client::<DatabaseClient<_>>(&url)); let client = try!(nanoipc::generic_client::<DatabaseClient<_>>(&url));
Ok(client) Ok(client)
} }
pub fn extras_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> { pub fn extras_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> {
let url = try!(extras_service_url(db_path)); let url = try!(extras_service_url(db_path));
let client = try!(nanoipc::init_client::<DatabaseClient<_>>(&url)); let client = try!(nanoipc::generic_client::<DatabaseClient<_>>(&url));
Ok(client) Ok(client)
} }

View File

@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::time::Duration;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::str::{self, Lines}; use std::str::{self, Lines};
use std::net::{TcpStream, SocketAddr}; use std::net::{TcpStream, SocketAddr};
@ -43,10 +44,11 @@ pub fn read_block(lines: &mut Lines, all: bool) -> String {
pub fn request(address: &SocketAddr, request: &str) -> Response { pub fn request(address: &SocketAddr, request: &str) -> Response {
let mut req = TcpStream::connect(address).unwrap(); let mut req = TcpStream::connect(address).unwrap();
req.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
req.write_all(request.as_bytes()).unwrap(); req.write_all(request.as_bytes()).unwrap();
let mut response = String::new(); let mut response = String::new();
req.read_to_string(&mut response).unwrap(); let _ = req.read_to_string(&mut response);
let mut lines = response.lines(); let mut lines = response.lines();
let status = lines.next().unwrap().to_owned(); let status = lines.next().unwrap().to_owned();

View File

@ -23,7 +23,8 @@ use std::ops::{Deref, DerefMut};
use rand::random; use rand::random;
pub struct RandomTempPath { pub struct RandomTempPath {
path: PathBuf path: PathBuf,
pub panic_on_drop_failure: bool,
} }
pub fn random_filename() -> String { pub fn random_filename() -> String {
@ -39,7 +40,8 @@ impl RandomTempPath {
let mut dir = env::temp_dir(); let mut dir = env::temp_dir();
dir.push(random_filename()); dir.push(random_filename());
RandomTempPath { RandomTempPath {
path: dir.clone() path: dir.clone(),
panic_on_drop_failure: true,
} }
} }
@ -48,7 +50,8 @@ impl RandomTempPath {
dir.push(random_filename()); dir.push(random_filename());
fs::create_dir_all(dir.as_path()).unwrap(); fs::create_dir_all(dir.as_path()).unwrap();
RandomTempPath { RandomTempPath {
path: dir.clone() path: dir.clone(),
panic_on_drop_failure: true,
} }
} }
@ -72,16 +75,24 @@ impl AsRef<Path> for RandomTempPath {
self.as_path() self.as_path()
} }
} }
impl Deref for RandomTempPath {
type Target = Path;
fn deref(&self) -> &Self::Target {
self.as_path()
}
}
impl Drop for RandomTempPath { impl Drop for RandomTempPath {
fn drop(&mut self) { fn drop(&mut self) {
if let Err(_) = fs::remove_dir_all(&self) { if let Err(_) = fs::remove_dir_all(&self) {
if let Err(e) = fs::remove_file(&self) { if let Err(e) = fs::remove_file(&self) {
if self.panic_on_drop_failure {
panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e);
} }
} }
} }
} }
}
pub struct GuardedTempResult<T> { pub struct GuardedTempResult<T> {
pub result: Option<T>, pub result: Option<T>,

View File

@ -23,15 +23,9 @@ RUN rustup target add aarch64-unknown-linux-gnu
# show backtraces # show backtraces
ENV RUST_BACKTRACE 1 ENV RUST_BACKTRACE 1
# set compilers
ENV CXX aarch64-linux-gnu-g++
ENV CC aarch64-linux-gnu-gcc
# show tools # show tools
RUN rustc -vV && \ RUN rustc -vV && \
cargo -V && \ cargo -V
gcc -v &&\
g++ -v
# build parity # build parity
RUN git clone https://github.com/ethcore/parity && \ RUN git clone https://github.com/ethcore/parity && \

View File

@ -23,15 +23,10 @@ RUN rustup target add armv7-unknown-linux-gnueabihf
# show backtraces # show backtraces
ENV RUST_BACKTRACE 1 ENV RUST_BACKTRACE 1
# set compilers
ENV CXX arm-linux-gnueabihf-g++
ENV CC arm-linux-gnueabihf-gcc
# show tools # show tools
RUN rustc -vV && \ RUN rustc -vV && \
cargo -V && \ cargo -V
gcc -v &&\
g++ -v
# build parity # build parity
RUN git clone https://github.com/ethcore/parity && \ RUN git clone https://github.com/ethcore/parity && \

View File

@ -91,7 +91,7 @@ pub struct Light {
seed_compute: Mutex<SeedHashCompute>, seed_compute: Mutex<SeedHashCompute>,
} }
/// Light cache structur /// Light cache structure
impl Light { impl Light {
/// Create a new light cache for a given block number /// Create a new light cache for a given block number
pub fn new(block_number: u64) -> Light { pub fn new(block_number: u64) -> Light {
@ -134,16 +134,27 @@ impl Light {
}) })
} }
pub fn to_file(&self) -> io::Result<()> { pub fn to_file(&self) -> io::Result<PathBuf> {
let seed_compute = self.seed_compute.lock(); let seed_compute = self.seed_compute.lock();
let path = Light::file_path(seed_compute.get_seedhash(self.block_number)); let path = Light::file_path(seed_compute.get_seedhash(self.block_number));
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
let deprecated = Light::file_path(
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2));
if deprecated.exists() {
debug!(target: "ethash", "removing: {:?}", &deprecated);
try!(fs::remove_file(deprecated));
}
}
try!(fs::create_dir_all(path.parent().unwrap())); try!(fs::create_dir_all(path.parent().unwrap()));
let mut file = try!(File::create(path)); let mut file = try!(File::create(&path));
let cache_size = self.cache.len() * NODE_BYTES; let cache_size = self.cache.len() * NODE_BYTES;
let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) }; let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) };
try!(file.write(buf)); try!(file.write(buf));
Ok(()) Ok(path)
} }
} }
@ -455,3 +466,18 @@ fn test_seed_compute_after_newer() {
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162]; let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash); assert_eq!(seed_compute.get_seedhash(486382), hash);
} }
#[test]
fn test_drop_old_data() {
let first = Light::new(0).to_file().unwrap();
let second = Light::new(ETHASH_EPOCH_LENGTH).to_file().unwrap();
assert!(fs::metadata(&first).is_ok());
let _ = Light::new(ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = Light::new(ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err());
}

View File

@ -20,12 +20,12 @@ num_cpus = "0.2"
crossbeam = "0.2.9" crossbeam = "0.2.9"
lazy_static = "0.2" lazy_static = "0.2"
bloomchain = "0.1" bloomchain = "0.1"
rayon = "0.3.1" rayon = "0.4.2"
semver = "0.2" semver = "0.2"
bit-set = "0.4" bit-set = "0.4"
time = "0.1" time = "0.1"
evmjit = { path = "../evmjit", optional = true } evmjit = { path = "../evmjit", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }

View File

@ -322,6 +322,26 @@ impl AccountProvider {
Ok(signature) Ok(signature)
} }
/// Decrypts a message. Account must be unlocked.
pub fn decrypt(&self, account: Address, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let data = {
let mut unlocked = self.unlocked.lock();
let data = try!(unlocked.get(&account).ok_or(Error::NotUnlocked)).clone();
if let Unlock::Temp = data.unlock {
unlocked.remove(&account).expect("data exists: so key must exist: qed");
}
if let Unlock::Timed((ref start, ref duration)) = data.unlock {
if start.elapsed() > Duration::from_millis(*duration as u64) {
unlocked.remove(&account).expect("data exists: so key must exist: qed");
return Err(Error::NotUnlocked);
}
}
data
};
Ok(try!(self.sstore.decrypt(&account, &data.password, shared_mac, message)))
}
/// Unlocks an account, signs the message, and locks it again. /// Unlocks an account, signs the message, and locks it again.
pub fn sign_with_password(&self, account: Address, password: String, message: Message) -> Result<Signature, Error> { pub fn sign_with_password(&self, account: Address, password: String, message: Message) -> Result<Signature, Error> {
let signature = try!(self.sstore.sign(&account, &password, &message)); let signature = try!(self.sstore.sign(&account, &password, &message));

View File

@ -205,7 +205,6 @@ pub struct ClosedBlock {
block: ExecutedBlock, block: ExecutedBlock,
uncle_bytes: Bytes, uncle_bytes: Bytes,
last_hashes: Arc<LastHashes>, last_hashes: Arc<LastHashes>,
unclosed_state: State,
} }
/// Just like `ClosedBlock` except that we can't reopen it and it's faster. /// Just like `ClosedBlock` except that we can't reopen it and it's faster.
@ -343,18 +342,19 @@ impl<'x> OpenBlock<'x> {
} }
} }
/// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles. /// Turn this into a `ClosedBlock`.
pub fn close(self) -> ClosedBlock { pub fn close(self) -> ClosedBlock {
let mut s = self; let mut s = self;
let unclosed_state = s.block.state.clone(); // take a snapshot so the engine's changes can be rolled back.
s.block.state.snapshot();
s.engine.on_close_block(&mut s.block); s.engine.on_close_block(&mut s.block);
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec())));
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
s.block.base.header.set_state_root(s.block.state.root().clone()); s.block.base.header.set_state_root(s.block.state.root().clone());
s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec())));
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
@ -362,33 +362,37 @@ impl<'x> OpenBlock<'x> {
block: s.block, block: s.block,
uncle_bytes: uncle_bytes, uncle_bytes: uncle_bytes,
last_hashes: s.last_hashes, last_hashes: s.last_hashes,
unclosed_state: unclosed_state,
} }
} }
/// Turn this into a `LockedBlock`. A BlockChain must be provided in order to figure out the uncles. /// Turn this into a `LockedBlock`.
pub fn close_and_lock(self) -> LockedBlock { pub fn close_and_lock(self) -> LockedBlock {
let mut s = self; let mut s = self;
// take a snapshot so the engine's changes can be rolled back.
s.block.state.snapshot();
s.engine.on_close_block(&mut s.block); s.engine.on_close_block(&mut s.block);
if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP { if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP {
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec())));
} }
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
if s.block.base.header.uncles_hash().is_zero() { if s.block.base.header.uncles_hash().is_zero() {
s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
} }
if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP { if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP {
s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec())));
} }
s.block.base.header.set_state_root(s.block.state.root().clone()); s.block.base.header.set_state_root(s.block.state.root().clone());
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
LockedBlock { ClosedBlock {
block: s.block, block: s.block,
uncle_bytes: uncle_bytes, uncle_bytes: uncle_bytes,
} last_hashes: s.last_hashes,
}.lock()
} }
} }
@ -409,7 +413,17 @@ impl ClosedBlock {
pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) }
/// Turn this into a `LockedBlock`, unable to be reopened again. /// Turn this into a `LockedBlock`, unable to be reopened again.
pub fn lock(self) -> LockedBlock { pub fn lock(mut self) -> LockedBlock {
// finalize the changes made by the engine.
self.block.state.clear_snapshot();
if let Err(e) = self.block.state.commit() {
warn!("Error committing closed block's state: {:?}", e);
}
// set the state root here, after commit recalculates with the block
// rewards.
self.block.base.header.set_state_root(self.block.state.root().clone());
LockedBlock { LockedBlock {
block: self.block, block: self.block,
uncle_bytes: self.uncle_bytes, uncle_bytes: self.uncle_bytes,
@ -417,12 +431,12 @@ impl ClosedBlock {
} }
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
pub fn reopen<'a>(self, engine: &'a Engine) -> OpenBlock<'a> { pub fn reopen(mut self, engine: &Engine) -> OpenBlock {
// revert rewards (i.e. set state back at last transaction's state). // revert rewards (i.e. set state back at last transaction's state).
let mut block = self.block; self.block.state.revert_snapshot();
block.state = self.unclosed_state;
OpenBlock { OpenBlock {
block: block, block: self.block,
engine: engine, engine: engine,
last_hashes: self.last_hashes, last_hashes: self.last_hashes,
} }

View File

@ -23,6 +23,7 @@ use header::*;
use super::extras::*; use super::extras::*;
use transaction::*; use transaction::*;
use views::*; use views::*;
use log_entry::{LogEntry, LocalizedLogEntry};
use receipt::Receipt; use receipt::Receipt;
use blooms::{Bloom, BloomGroup}; use blooms::{Bloom, BloomGroup};
use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData};
@ -127,6 +128,10 @@ pub trait BlockProvider {
/// Returns numbers of blocks containing given bloom. /// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>; fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
/// Returns logs matching given filter.
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized;
} }
#[derive(Debug, Hash, Eq, PartialEq, Clone)] #[derive(Debug, Hash, Eq, PartialEq, Clone)]
@ -315,6 +320,51 @@ impl BlockProvider for BlockChain {
.map(|b| b as BlockNumber) .map(|b| b as BlockNumber)
.collect() .collect()
} }
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized {
// sort in reverse order
blocks.sort_by(|a, b| b.cmp(a));
let mut log_index = 0;
let mut logs = blocks.into_iter()
.filter_map(|number| self.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, mut receipts, hashes)| {
assert_eq!(receipts.len(), hashes.len());
log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len());
let receipts_len = receipts.len();
receipts.reverse();
receipts.into_iter()
.map(|receipt| receipt.logs)
.zip(hashes)
.enumerate()
.flat_map(move |(index, (mut logs, tx_hash))| {
let current_log_index = log_index;
log_index -= logs.len();
logs.reverse();
logs.into_iter()
.enumerate()
.map(move |(i, log)| LocalizedLogEntry {
entry: log,
block_hash: hash,
block_number: number,
transaction_hash: tx_hash,
// iterating in reverse order
transaction_index: receipts_len - index - 1,
log_index: current_log_index - i - 1,
})
})
})
.filter(|log_entry| matches(&log_entry.entry))
.take(limit.unwrap_or(::std::usize::MAX))
.collect::<Vec<LocalizedLogEntry>>();
logs.reverse();
logs
}
} }
pub struct AncestryIter<'a> { pub struct AncestryIter<'a> {
@ -1160,6 +1210,7 @@ mod tests {
use blockchain::extras::TransactionAddress; use blockchain::extras::TransactionAddress;
use views::BlockView; use views::BlockView;
use transaction::{Transaction, Action}; use transaction::{Transaction, Action};
use log_entry::{LogEntry, LocalizedLogEntry};
fn new_db(path: &str) -> Arc<Database> { fn new_db(path: &str) -> Arc<Database> {
Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap()) Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap())
@ -1566,7 +1617,7 @@ mod tests {
let mut block_header = bc.block_header(&best_hash); let mut block_header = bc.block_header(&best_hash);
while !block_header.is_none() { while !block_header.is_none() {
block_header = bc.block_header(&block_header.unwrap().parent_hash()); block_header = bc.block_header(block_header.unwrap().parent_hash());
} }
assert!(bc.cache_size().blocks > 1024 * 1024); assert!(bc.cache_size().blocks > 1024 * 1024);
@ -1619,6 +1670,127 @@ mod tests {
res res
} }
#[test]
fn test_logs() {
// given
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
// just insert dummy transaction so that #transactions=#receipts
let t1 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let t2 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let t3 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let tx_hash1 = t1.hash();
let tx_hash2 = t2.hash();
let tx_hash3 = t3.hash();
let b1 = canon_chain.with_transaction(t1).with_transaction(t2).generate(&mut finalizer).unwrap();
let b2 = canon_chain.with_transaction(t3).generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
insert_block(&db, &bc, &b1, vec![Receipt {
state_root: H256::default(),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![1], },
LogEntry { address: Default::default(), topics: vec![], data: vec![2], },
],
},
Receipt {
state_root: H256::default(),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![3], },
],
}]);
insert_block(&db, &bc, &b2, vec![
Receipt {
state_root: H256::default(),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![4], },
],
}
]);
// when
let block1 = BlockView::new(&b1);
let block2 = BlockView::new(&b2);
let logs1 = bc.logs(vec![1, 2], |_| true, None);
let logs2 = bc.logs(vec![1, 2], |_| true, Some(1));
// then
assert_eq!(logs1, vec![
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![1] },
block_hash: block1.hash(),
block_number: block1.header().number(),
transaction_hash: tx_hash1.clone(),
transaction_index: 0,
log_index: 0,
},
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![2] },
block_hash: block1.hash(),
block_number: block1.header().number(),
transaction_hash: tx_hash1.clone(),
transaction_index: 0,
log_index: 1,
},
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![3] },
block_hash: block1.hash(),
block_number: block1.header().number(),
transaction_hash: tx_hash2.clone(),
transaction_index: 1,
log_index: 2,
},
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] },
block_hash: block2.hash(),
block_number: block2.header().number(),
transaction_hash: tx_hash3.clone(),
transaction_index: 0,
log_index: 0,
}
]);
assert_eq!(logs2, vec![
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] },
block_hash: block2.hash(),
block_number: block2.header().number(),
transaction_hash: tx_hash3.clone(),
transaction_index: 0,
log_index: 0,
}
]);
}
#[test] #[test]
fn test_bloom_filter_simple() { fn test_bloom_filter_simple() {
// TODO: From here // TODO: From here

View File

@ -17,14 +17,15 @@
use crypto::sha2::Sha256 as Sha256Digest; use crypto::sha2::Sha256 as Sha256Digest;
use crypto::ripemd160::Ripemd160 as Ripemd160Digest; use crypto::ripemd160::Ripemd160 as Ripemd160Digest;
use crypto::digest::Digest; use crypto::digest::Digest;
use util::*; use std::cmp::min;
use util::{U256, H256, Hashable, FixedHash, BytesRef};
use ethkey::{Signature, recover as ec_recover}; use ethkey::{Signature, recover as ec_recover};
use ethjson; use ethjson;
/// Native implementation of a built-in contract. /// Native implementation of a built-in contract.
pub trait Impl: Send + Sync { pub trait Impl: Send + Sync {
/// execute this built-in on the given input, writing to the given output. /// execute this built-in on the given input, writing to the given output.
fn execute(&self, input: &[u8], out: &mut [u8]); fn execute(&self, input: &[u8], output: &mut BytesRef);
} }
/// A gas pricing scheme for built-in contracts. /// A gas pricing scheme for built-in contracts.
@ -56,7 +57,7 @@ impl Builtin {
pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) } pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) }
/// Simple forwarder for execute. /// Simple forwarder for execute.
pub fn execute(&self, input: &[u8], output: &mut[u8]) { self.native.execute(input, output) } pub fn execute(&self, input: &[u8], output: &mut BytesRef) { self.native.execute(input, output) }
} }
impl From<ethjson::spec::Builtin> for Builtin { impl From<ethjson::spec::Builtin> for Builtin {
@ -108,14 +109,13 @@ struct Sha256;
struct Ripemd160; struct Ripemd160;
impl Impl for Identity { impl Impl for Identity {
fn execute(&self, input: &[u8], output: &mut [u8]) { fn execute(&self, input: &[u8], output: &mut BytesRef) {
let len = min(input.len(), output.len()); output.write(0, input);
output[..len].copy_from_slice(&input[..len]);
} }
} }
impl Impl for EcRecover { impl Impl for EcRecover {
fn execute(&self, i: &[u8], output: &mut [u8]) { fn execute(&self, i: &[u8], output: &mut BytesRef) {
let len = min(i.len(), 128); let len = min(i.len(), 128);
let mut input = [0; 128]; let mut input = [0; 128];
@ -135,58 +135,34 @@ impl Impl for EcRecover {
if s.is_valid() { if s.is_valid() {
if let Ok(p) = ec_recover(&s, &hash) { if let Ok(p) = ec_recover(&s, &hash) {
let r = p.sha3(); let r = p.sha3();
output.write(0, &[0; 12]);
let out_len = min(output.len(), 32); output.write(12, &r[12..r.len()]);
for x in &mut output[0.. min(12, out_len)] {
*x = 0;
}
if out_len > 12 {
output[12..out_len].copy_from_slice(&r[12..out_len]);
}
} }
} }
} }
} }
impl Impl for Sha256 { impl Impl for Sha256 {
fn execute(&self, input: &[u8], output: &mut [u8]) { fn execute(&self, input: &[u8], output: &mut BytesRef) {
let out_len = min(output.len(), 32);
let mut sha = Sha256Digest::new(); let mut sha = Sha256Digest::new();
sha.input(input); sha.input(input);
if out_len == 32 {
sha.result(&mut output[0..32]);
} else {
let mut out = [0; 32]; let mut out = [0; 32];
sha.result(&mut out); sha.result(&mut out);
output.copy_from_slice(&out[..out_len]) output.write(0, &out);
}
} }
} }
impl Impl for Ripemd160 { impl Impl for Ripemd160 {
fn execute(&self, input: &[u8], output: &mut [u8]) { fn execute(&self, input: &[u8], output: &mut BytesRef) {
let out_len = min(output.len(), 32);
let mut sha = Ripemd160Digest::new(); let mut sha = Ripemd160Digest::new();
sha.input(input); sha.input(input);
for x in &mut output[0.. min(12, out_len)] { let mut out = [0; 32];
*x = 0; sha.result(&mut out[12..32]);
}
if out_len >= 32 { output.write(0, &out);
sha.result(&mut output[12..32]);
} else if out_len > 12 {
let mut out = [0; 20];
sha.result(&mut out);
output.copy_from_slice(&out[12..out_len])
}
} }
} }
@ -194,7 +170,7 @@ impl Impl for Ripemd160 {
mod tests { mod tests {
use super::{Builtin, Linear, ethereum_builtin, Pricer}; use super::{Builtin, Linear, ethereum_builtin, Pricer};
use ethjson; use ethjson;
use util::U256; use util::{U256, BytesRef};
#[test] #[test]
fn identity() { fn identity() {
@ -203,15 +179,15 @@ mod tests {
let i = [0u8, 1, 2, 3]; let i = [0u8, 1, 2, 3];
let mut o2 = [255u8; 2]; let mut o2 = [255u8; 2];
f.execute(&i[..], &mut o2[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o2[..]));
assert_eq!(i[0..2], o2); assert_eq!(i[0..2], o2);
let mut o4 = [255u8; 4]; let mut o4 = [255u8; 4];
f.execute(&i[..], &mut o4[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o4[..]));
assert_eq!(i, o4); assert_eq!(i, o4);
let mut o8 = [255u8; 8]; let mut o8 = [255u8; 8];
f.execute(&i[..], &mut o8[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
assert_eq!(i, o8[..4]); assert_eq!(i, o8[..4]);
assert_eq!([255u8; 4], o8[4..]); assert_eq!([255u8; 4], o8[4..]);
} }
@ -224,16 +200,20 @@ mod tests {
let i = [0u8; 0]; let i = [0u8; 0];
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i[..], &mut o[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]);
let mut o8 = [255u8; 8]; let mut o8 = [255u8; 8];
f.execute(&i[..], &mut o8[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]); assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]);
let mut o34 = [255u8; 34]; let mut o34 = [255u8; 34];
f.execute(&i[..], &mut o34[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..]));
assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]); assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]);
let mut ov = vec![];
f.execute(&i[..], &mut BytesRef::Flexible(&mut ov));
assert_eq!(&ov[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]);
} }
#[test] #[test]
@ -244,15 +224,15 @@ mod tests {
let i = [0u8; 0]; let i = [0u8; 0];
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i[..], &mut o[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]);
let mut o8 = [255u8; 8]; let mut o8 = [255u8; 8];
f.execute(&i[..], &mut o8[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]);
let mut o34 = [255u8; 34]; let mut o34 = [255u8; 34];
f.execute(&i[..], &mut o34[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..]));
assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]); assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]);
} }
@ -272,46 +252,46 @@ mod tests {
let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i[..], &mut o[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]);
let mut o8 = [255u8; 8]; let mut o8 = [255u8; 8];
f.execute(&i[..], &mut o8[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]);
let mut o34 = [255u8; 34]; let mut o34 = [255u8; 34];
f.execute(&i[..], &mut o34[..]); f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..]));
assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]); assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]);
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i_bad[..], &mut o[..]); f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap(); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i_bad[..], &mut o[..]); f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap(); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i_bad[..], &mut o[..]); f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap(); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i_bad[..], &mut o[..]); f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i_bad[..], &mut o[..]); f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
// TODO: Should this (corrupted version of the above) fail rather than returning some address? // TODO: Should this (corrupted version of the above) fail rather than returning some address?
/* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); /* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
let mut o = [255u8; 32]; let mut o = [255u8; 32];
f.execute(&i_bad[..], &mut o[..]); f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/
} }
@ -336,7 +316,7 @@ mod tests {
let i = [0u8, 1, 2, 3]; let i = [0u8, 1, 2, 3];
let mut o = [255u8; 4]; let mut o = [255u8; 4];
b.execute(&i[..], &mut o[..]); b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(i, o); assert_eq!(i, o);
} }
@ -357,7 +337,7 @@ mod tests {
let i = [0u8, 1, 2, 3]; let i = [0u8, 1, 2, 3];
let mut o = [255u8; 4]; let mut o = [255u8; 4];
b.execute(&i[..], &mut o[..]); b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
assert_eq!(i, o); assert_eq!(i, o);
} }
} }

View File

@ -145,7 +145,9 @@ pub struct Client {
factories: Factories, factories: Factories,
} }
const HISTORY: u64 = 1200; /// The pruning constant -- how old blocks must be before we
/// assume finality of a given candidate.
pub const HISTORY: u64 = 1200;
/// Append a path element to the given path and return the string. /// Append a path element to the given path and return the string.
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> { pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
@ -169,7 +171,7 @@ impl Client {
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database)));
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()))); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) {
@ -674,6 +676,8 @@ impl Client {
impl snapshot::DatabaseRestore for Client { impl snapshot::DatabaseRestore for Client {
/// Restart the client with a new backend /// Restart the client with a new backend
fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> {
trace!(target: "snapshot", "Replacing client database with {:?}", new_db);
let _import_lock = self.import_lock.lock(); let _import_lock = self.import_lock.lock();
let mut state_db = self.state_db.write(); let mut state_db = self.state_db.write();
let mut chain = self.chain.write(); let mut chain = self.chain.write();
@ -684,7 +688,7 @@ impl snapshot::DatabaseRestore for Client {
*state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE);
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
*tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from)); *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
Ok(()) Ok(())
} }
} }
@ -957,9 +961,7 @@ impl BlockChainClient for Client {
} }
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> { fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
// TODO: lock blockchain only once let blocks = filter.bloom_possibilities().iter()
let mut blocks = filter.bloom_possibilities().iter()
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
.flat_map(|m| m) .flat_map(|m| m)
// remove duplicate elements // remove duplicate elements
@ -967,35 +969,7 @@ impl BlockChainClient for Client {
.into_iter() .into_iter()
.collect::<Vec<u64>>(); .collect::<Vec<u64>>();
blocks.sort(); self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit)
let chain = self.chain.read();
blocks.into_iter()
.filter_map(|number| chain.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, receipts, hashes)| {
let mut log_index = 0;
receipts.into_iter()
.enumerate()
.flat_map(|(index, receipt)| {
log_index += receipt.logs.len();
receipt.logs.into_iter()
.enumerate()
.filter(|tuple| filter.matches(&tuple.1))
.map(|(i, log)| LocalizedLogEntry {
entry: log,
block_hash: hash.clone(),
block_number: number,
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::default),
transaction_index: index,
log_index: log_index + i
})
.collect::<Vec<LocalizedLogEntry>>()
})
.collect::<Vec<LocalizedLogEntry>>()
})
.collect()
} }
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> { fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {

View File

@ -18,7 +18,7 @@ use std::str::FromStr;
pub use std::time::Duration; pub use std::time::Duration;
pub use block_queue::BlockQueueConfig; pub use block_queue::BlockQueueConfig;
pub use blockchain::Config as BlockChainConfig; pub use blockchain::Config as BlockChainConfig;
pub use trace::{Config as TraceConfig, Switch}; pub use trace::Config as TraceConfig;
pub use evm::VMType; pub use evm::VMType;
pub use verification::VerifierType; pub use verification::VerifierType;
use util::{journaldb, CompactionProfile}; use util::{journaldb, CompactionProfile};

View File

@ -23,7 +23,7 @@ mod trace;
mod client; mod client;
pub use self::client::*; pub use self::client::*;
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType}; pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType};
pub use self::error::Error; pub use self::error::Error;
pub use types::ids::*; pub use types::ids::*;
pub use self::test_client::{TestBlockChainClient, EachBlockWith}; pub use self::test_client::{TestBlockChainClient, EachBlockWith};

View File

@ -67,6 +67,8 @@ pub struct TestBlockChainClient {
pub execution_result: RwLock<Option<Result<Executed, CallError>>>, pub execution_result: RwLock<Option<Result<Executed, CallError>>>,
/// Transaction receipts. /// Transaction receipts.
pub receipts: RwLock<HashMap<TransactionID, LocalizedReceipt>>, pub receipts: RwLock<HashMap<TransactionID, LocalizedReceipt>>,
/// Logs
pub logs: RwLock<Vec<LocalizedLogEntry>>,
/// Block queue size. /// Block queue size.
pub queue_size: AtomicUsize, pub queue_size: AtomicUsize,
/// Miner /// Miner
@ -114,6 +116,7 @@ impl TestBlockChainClient {
code: RwLock::new(HashMap::new()), code: RwLock::new(HashMap::new()),
execution_result: RwLock::new(None), execution_result: RwLock::new(None),
receipts: RwLock::new(HashMap::new()), receipts: RwLock::new(HashMap::new()),
logs: RwLock::new(Vec::new()),
queue_size: AtomicUsize::new(0), queue_size: AtomicUsize::new(0),
miner: Arc::new(Miner::with_spec(&spec)), miner: Arc::new(Miner::with_spec(&spec)),
spec: spec, spec: spec,
@ -165,6 +168,11 @@ impl TestBlockChainClient {
*self.latest_block_timestamp.write() = ts; *self.latest_block_timestamp.write() = ts;
} }
/// Set logs to return for each logs call.
pub fn set_logs(&self, logs: Vec<LocalizedLogEntry>) {
*self.logs.write() = logs;
}
/// Add blocks to test client. /// Add blocks to test client.
pub fn add_blocks(&self, count: usize, with: EachBlockWith) { pub fn add_blocks(&self, count: usize, with: EachBlockWith) {
let len = self.numbers.read().len(); let len = self.numbers.read().len();
@ -390,8 +398,13 @@ impl BlockChainClient for TestBlockChainClient {
unimplemented!(); unimplemented!();
} }
fn logs(&self, _filter: Filter) -> Vec<LocalizedLogEntry> { fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
unimplemented!(); let mut logs = self.logs.read().clone();
let len = logs.len();
match filter.limit {
Some(limit) if limit <= len => logs.split_off(len - limit),
_ => logs,
}
} }
fn last_hashes(&self) -> LastHashes { fn last_hashes(&self) -> LastHashes {

View File

@ -218,8 +218,11 @@ pub trait BlockChainClient : Sync + Send {
/// Extended client interface used for mining /// Extended client interface used for mining
pub trait MiningBlockChainClient : BlockChainClient { pub trait MiningBlockChainClient : BlockChainClient {
/// Returns OpenBlock prepared for closing. /// Returns OpenBlock prepared for closing.
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) fn prepare_open_block(&self,
-> OpenBlock; author: Address,
gas_range_target: (U256, U256),
extra_data: Bytes
) -> OpenBlock;
/// Returns EvmFactory. /// Returns EvmFactory.
fn vm_factory(&self) -> &EvmFactory; fn vm_factory(&self) -> &EvmFactory;

View File

@ -99,6 +99,10 @@ impl Engine for BasicAuthority {
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
fn on_close_block(&self, _block: &mut ExecutedBlock) {} fn on_close_block(&self, _block: &mut ExecutedBlock) {}
fn is_sealer(&self, author: &Address) -> Option<bool> {
Some(self.our_params.authorities.contains(author))
}
/// Attempt to seal the block internally. /// Attempt to seal the block internally.
/// ///
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will /// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
@ -257,4 +261,14 @@ mod tests {
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap(); let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
assert!(b.try_seal(engine, seal).is_ok()); assert!(b.try_seal(engine, seal).is_ok());
} }
#[test]
fn seals_internally() {
let tap = AccountProvider::transient_provider();
let authority = tap.insert_account("".sha3(), "").unwrap();
let engine = new_test_authority().engine;
assert!(!engine.is_sealer(&Address::default()).unwrap());
assert!(engine.is_sealer(&authority).unwrap());
}
} }

View File

@ -58,6 +58,8 @@ impl Engine for InstantSeal {
Schedule::new_homestead() Schedule::new_homestead()
} }
fn is_sealer(&self, _author: &Address) -> Option<bool> { Some(true) }
fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> { fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
Some(Vec::new()) Some(Vec::new())
} }
@ -71,18 +73,12 @@ mod tests {
use spec::Spec; use spec::Spec;
use block::*; use block::*;
/// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_instant() -> Spec {
let bytes: &[u8] = include_bytes!("../../res/instant_seal.json");
Spec::load(bytes).expect("invalid chain spec")
}
#[test] #[test]
fn instant_can_seal() { fn instant_can_seal() {
let tap = AccountProvider::transient_provider(); let tap = AccountProvider::transient_provider();
let addr = tap.insert_account("".sha3(), "").unwrap(); let addr = tap.insert_account("".sha3(), "").unwrap();
let spec = new_test_instant(); let spec = Spec::new_test_instant();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
@ -98,7 +94,7 @@ mod tests {
#[test] #[test]
fn instant_cant_verify() { fn instant_cant_verify() {
let engine = new_test_instant().engine; let engine = Spec::new_test_instant().engine;
let mut header: Header = Header::default(); let mut header: Header = Header::default();
assert!(engine.verify_block_basic(&header, None).is_ok()); assert!(engine.verify_block_basic(&header, None).is_ok());

View File

@ -30,7 +30,7 @@ pub use self::tendermint::Tendermint;
pub use self::signed_vote::SignedVote; pub use self::signed_vote::SignedVote;
pub use self::propose_collect::ProposeCollect; pub use self::propose_collect::ProposeCollect;
use common::{HashMap, SemanticVersion, Header, EnvInfo, Address, Builtin, BTreeMap, U256, Bytes, SignedTransaction, Error, H520}; use common::*;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::ExecutedBlock; use block::ExecutedBlock;
@ -95,6 +95,11 @@ pub trait Engine : Sync + Send {
/// Block transformation functions, after the transactions. /// Block transformation functions, after the transactions.
fn on_close_block(&self, _block: &mut ExecutedBlock) {} fn on_close_block(&self, _block: &mut ExecutedBlock) {}
/// If Some(true) this author is able to generate seals, generate_seal has to be implemented.
/// None indicates that this Engine never seals internally regardless of author (e.g. PoW).
fn is_sealer(&self, _author: &Address) -> Option<bool> { None }
/// Checks if default address is able to seal.
fn is_default_sealer(&self) -> Option<bool> { self.is_sealer(&Default::default()) }
/// Attempt to seal the block internally. /// Attempt to seal the block internally.
/// ///
/// If `Some` is returned, then you get a valid seal. /// If `Some` is returned, then you get a valid seal.
@ -149,7 +154,7 @@ pub trait Engine : Sync + Send {
fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.builtins().get(a).unwrap().cost(input.len()) } fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.builtins().get(a).unwrap().cost(input.len()) }
/// Execution the builtin contract `a` on `input` and return `output`. /// Execution the builtin contract `a` on `input` and return `output`.
/// Panics if `is_builtin(a)` is not true. /// Panics if `is_builtin(a)` is not true.
fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut [u8]) { self.builtins().get(a).unwrap().execute(input, output); } fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut BytesRef) { self.builtins().get(a).unwrap().execute(input, output); }
// TODO: sealing stuff - though might want to leave this for later. // TODO: sealing stuff - though might want to leave this for later.
} }

View File

@ -160,16 +160,14 @@ impl Engine for Ethash {
let fields = block.fields_mut(); let fields = block.fields_mut();
// Bestow block reward // Bestow block reward
fields.state.add_balance(&fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len()))); fields.state.add_balance(fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())));
// Bestow uncle rewards // Bestow uncle rewards
let current_number = fields.header.number(); let current_number = fields.header.number();
for u in fields.uncles.iter() { for u in fields.uncles.iter() {
fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8))); fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)));
} }
if let Err(e) = fields.state.commit() {
warn!("Encountered error on state commit: {}", e);
}
} }
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {

View File

@ -113,7 +113,10 @@ impl<'a> Finalize for Result<GasLeft<'a>> {
} }
/// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256 /// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256
pub trait CostType: ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Output=Self> + ops::Sub<Output=Self> + ops::Shr<usize, Output=Self> + ops::Shl<usize, Output=Self> + cmp::Ord + Sized + From<usize> + Copy { pub trait CostType: Sized + From<usize> + Copy
+ ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Output=Self> +ops::Sub<Output=Self>
+ ops::Shr<usize, Output=Self> + ops::Shl<usize, Output=Self>
+ cmp::Ord + fmt::Debug {
/// Converts this cost into `U256` /// Converts this cost into `U256`
fn as_u256(&self) -> U256; fn as_u256(&self) -> U256;
/// Tries to fit `U256` into this `Cost` type /// Tries to fit `U256` into this `Cost` type

View File

@ -83,6 +83,9 @@ pub trait Ext {
/// Returns code at given address /// Returns code at given address
fn extcode(&self, address: &Address) -> Bytes; fn extcode(&self, address: &Address) -> Bytes;
/// Returns code size at given address
fn extcodesize(&self, address: &Address) -> usize;
/// Creates log entry with given topics and data /// Creates log entry with given topics and data
fn log(&mut self, topics: Vec<H256>, data: &[u8]); fn log(&mut self, topics: Vec<H256>, data: &[u8]);

View File

@ -53,6 +53,17 @@ fn color(instruction: Instruction, name: &'static str) -> String {
type CodePosition = usize; type CodePosition = usize;
type ProgramCounter = usize; type ProgramCounter = usize;
const ONE: U256 = U256([1, 0, 0, 0]);
const TWO: U256 = U256([2, 0, 0, 0]);
const TWO_POW_5: U256 = U256([0x20, 0, 0, 0]);
const TWO_POW_8: U256 = U256([0x100, 0, 0, 0]);
const TWO_POW_16: U256 = U256([0x10000, 0, 0, 0]);
const TWO_POW_24: U256 = U256([0x1000000, 0, 0, 0]);
const TWO_POW_64: U256 = U256([0, 0x1, 0, 0]); // 0x1 00000000 00000000
const TWO_POW_96: U256 = U256([0, 0x100000000, 0, 0]); //0x1 00000000 00000000 00000000
const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000
const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000
/// Abstraction over raw vector of Bytes. Easier state management of PC. /// Abstraction over raw vector of Bytes. Easier state management of PC.
struct CodeReader<'a> { struct CodeReader<'a> {
position: ProgramCounter, position: ProgramCounter,
@ -126,7 +137,7 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
gasometer.current_gas = gasometer.current_gas - gas_cost; gasometer.current_gas = gasometer.current_gas - gas_cost;
evm_debug!({ evm_debug!({
println!("[0x{:x}][{}(0x{:x}) Gas: {:x}\n Gas Before: {:x}", println!("[0x{:x}][{}(0x{:x}) Gas: {:?}\n Gas Before: {:?}",
reader.position, reader.position,
color(instruction, info.name), color(instruction, info.name),
instruction, instruction,
@ -471,7 +482,7 @@ impl<Cost: CostType> Interpreter<Cost> {
}, },
instructions::EXTCODESIZE => { instructions::EXTCODESIZE => {
let address = u256_to_address(&stack.pop_back()); let address = u256_to_address(&stack.pop_back());
let len = ext.extcode(&address).len(); let len = ext.extcodesize(&address);
stack.push(U256::from(len)); stack.push(U256::from(len));
}, },
instructions::CALLDATACOPY => { instructions::CALLDATACOPY => {
@ -599,7 +610,19 @@ impl<Cost: CostType> Interpreter<Cost> {
let a = stack.pop_back(); let a = stack.pop_back();
let b = stack.pop_back(); let b = stack.pop_back();
stack.push(if !self.is_zero(&b) { stack.push(if !self.is_zero(&b) {
a.overflowing_div(b).0 match b {
ONE => a,
TWO => a >> 1,
TWO_POW_5 => a >> 5,
TWO_POW_8 => a >> 8,
TWO_POW_16 => a >> 16,
TWO_POW_24 => a >> 24,
TWO_POW_64 => a >> 64,
TWO_POW_96 => a >> 96,
TWO_POW_224 => a >> 224,
TWO_POW_248 => a >> 248,
_ => a.overflowing_div(b).0,
}
} else { } else {
U256::zero() U256::zero()
}); });

View File

@ -18,6 +18,7 @@
use common::*; use common::*;
use evmjit; use evmjit;
use evm::{self, GasLeft}; use evm::{self, GasLeft};
use types::executed::CallType;
/// Should be used to convert jit types to ethcore /// Should be used to convert jit types to ethcore
trait FromJit<T>: Sized { trait FromJit<T>: Sized {
@ -77,10 +78,11 @@ impl IntoJit<evmjit::I256> for U256 {
impl IntoJit<evmjit::I256> for H256 { impl IntoJit<evmjit::I256> for H256 {
fn into_jit(self) -> evmjit::I256 { fn into_jit(self) -> evmjit::I256 {
let mut ret = [0; 4]; let mut ret = [0; 4];
for i in 0..self.bytes().len() { let len = self.len();
let rev = self.bytes().len() - 1 - i; for i in 0..len {
let rev = len - 1 - i;
let pos = rev / 8; let pos = rev / 8;
ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8); ret[pos] += (self[i] as u64) << ((rev % 8) * 8);
} }
evmjit::I256 { words: ret } evmjit::I256 { words: ret }
} }
@ -206,6 +208,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
let sender_address = unsafe { Address::from_jit(&*sender_address) }; let sender_address = unsafe { Address::from_jit(&*sender_address) };
let receive_address = unsafe { Address::from_jit(&*receive_address) }; let receive_address = unsafe { Address::from_jit(&*receive_address) };
let code_address = unsafe { Address::from_jit(&*code_address) }; let code_address = unsafe { Address::from_jit(&*code_address) };
// TODO Is it always safe in case of DELEGATE_CALL?
let transfer_value = unsafe { U256::from_jit(&*transfer_value) }; let transfer_value = unsafe { U256::from_jit(&*transfer_value) };
let value = Some(transfer_value); let value = Some(transfer_value);
@ -239,6 +242,12 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
} }
} }
// TODO [ToDr] Any way to detect DelegateCall?
let call_type = match is_callcode {
true => CallType::CallCode,
false => CallType::Call,
};
match self.ext.call( match self.ext.call(
&call_gas, &call_gas,
&sender_address, &sender_address,
@ -246,7 +255,9 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
value, value,
unsafe { slice::from_raw_parts(in_beg, in_size as usize) }, unsafe { slice::from_raw_parts(in_beg, in_size as usize) },
&code_address, &code_address,
unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) { unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) },
call_type,
) {
evm::MessageCallResult::Success(gas_left) => unsafe { evm::MessageCallResult::Success(gas_left) => unsafe {
*io_gas = (gas + gas_left).low_u64(); *io_gas = (gas + gas_left).low_u64();
true true

View File

@ -140,6 +140,10 @@ impl Ext for FakeExt {
self.codes.get(address).unwrap_or(&Bytes::new()).clone() self.codes.get(address).unwrap_or(&Bytes::new()).clone()
} }
fn extcodesize(&self, address: &Address) -> usize {
self.codes.get(address).map(|v| v.len()).unwrap_or(0)
}
fn log(&mut self, topics: Vec<H256>, data: &[u8]) { fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
self.logs.push(FakeLogEntry { self.logs.push(FakeLogEntry {
topics: topics, topics: topics,

View File

@ -193,7 +193,6 @@ impl<'a> Executive<'a> {
data: Some(t.data.clone()), data: Some(t.data.clone()),
call_type: CallType::Call, call_type: CallType::Call,
}; };
// TODO: move output upstream
let mut out = vec![]; let mut out = vec![];
(self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out) (self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out)
} }

View File

@ -205,6 +205,11 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
self.state.code(address).unwrap_or_else(|| vec![]) self.state.code(address).unwrap_or_else(|| vec![])
} }
fn extcodesize(&self, address: &Address) -> usize {
self.state.code_size(address).unwrap_or(0)
}
#[cfg_attr(feature="dev", allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]
fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256> fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256>
where Self: Sized { where Self: Sized {

View File

@ -131,6 +131,10 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer {
self.ext.extcode(address) self.ext.extcode(address)
} }
fn extcodesize(&self, address: &Address) -> usize {
self.ext.extcodesize(address)
}
fn log(&mut self, topics: Vec<H256>, data: &[u8]) { fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
self.ext.log(topics, data) self.ext.log(topics, data)
} }

View File

@ -24,7 +24,7 @@ use views::{BlockView, HeaderView};
use state::State; use state::State;
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics}; use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics};
use executive::contract_address; use executive::contract_address;
use block::{ClosedBlock, IsBlock, Block}; use block::{ClosedBlock, SealedBlock, IsBlock, Block};
use error::*; use error::*;
use transaction::{Action, SignedTransaction}; use transaction::{Action, SignedTransaction};
use receipt::{Receipt, RichReceipt}; use receipt::{Receipt, RichReceipt};
@ -34,6 +34,7 @@ use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, Transac
use miner::work_notify::WorkPoster; use miner::work_notify::WorkPoster;
use client::TransactionImportResult; use client::TransactionImportResult;
use miner::price_info::PriceInfo; use miner::price_info::PriceInfo;
use header::BlockNumber;
/// Different possible definitions for pending transaction set. /// Different possible definitions for pending transaction set.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -165,6 +166,7 @@ struct SealingWork {
} }
/// Keeps track of transactions using priority queue and holds currently mined block. /// Keeps track of transactions using priority queue and holds currently mined block.
/// Handles preparing work for "work sealing" or seals "internally" if Engine does not require work.
pub struct Miner { pub struct Miner {
// NOTE [ToDr] When locking always lock in this order! // NOTE [ToDr] When locking always lock in this order!
transaction_queue: Arc<Mutex<TransactionQueue>>, transaction_queue: Arc<Mutex<TransactionQueue>>,
@ -173,6 +175,7 @@ pub struct Miner {
sealing_block_last_request: Mutex<u64>, sealing_block_last_request: Mutex<u64>,
// for sealing... // for sealing...
options: MinerOptions, options: MinerOptions,
seals_internally: bool,
gas_range_target: RwLock<(U256, U256)>, gas_range_target: RwLock<(U256, U256)>,
author: RwLock<Address>, author: RwLock<Address>,
@ -185,33 +188,24 @@ pub struct Miner {
} }
impl Miner { impl Miner {
/// Creates new instance of miner without accounts, but with given spec. /// Creates new instance of miner.
pub fn with_spec(spec: &Spec) -> Miner { fn new_raw(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Miner {
Miner { let work_poster = match options.new_work_notify.is_empty() {
transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())), true => None,
options: Default::default(), false => Some(WorkPoster::new(&options.new_work_notify))
next_allowed_reseal: Mutex::new(Instant::now()), };
sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(20), enabled: false}),
gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
accounts: None,
engine: spec.engine.clone(),
work_poster: None,
gas_pricer: Mutex::new(GasPricer::new_fixed(20_000_000_000u64.into())),
}
}
/// Creates new instance of miner
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None };
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)));
Arc::new(Miner { Miner {
transaction_queue: txq, transaction_queue: txq,
next_allowed_reseal: Mutex::new(Instant::now()), next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0), sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(options.work_queue_size), enabled: options.force_sealing || !options.new_work_notify.is_empty()}), sealing_work: Mutex::new(SealingWork{
queue: UsingQueue::new(options.work_queue_size),
enabled: options.force_sealing
|| !options.new_work_notify.is_empty()
|| spec.engine.is_default_sealer().unwrap_or(false)
}),
seals_internally: spec.engine.is_default_sealer().is_some(),
gas_range_target: RwLock::new((U256::zero(), U256::zero())), gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
@ -220,7 +214,17 @@ impl Miner {
engine: spec.engine.clone(), engine: spec.engine.clone(),
work_poster: work_poster, work_poster: work_poster,
gas_pricer: Mutex::new(gas_pricer), gas_pricer: Mutex::new(gas_pricer),
}) }
}
/// Creates new instance of miner without accounts, but with given spec.
pub fn with_spec(spec: &Spec) -> Miner {
Miner::new_raw(Default::default(), GasPricer::new_fixed(20_000_000_000u64.into()), spec, None)
}
/// Creates new instance of a miner Arc.
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
Arc::new(Miner::new_raw(options, gas_pricer, spec, accounts))
} }
fn forced_sealing(&self) -> bool { fn forced_sealing(&self) -> bool {
@ -242,20 +246,17 @@ impl Miner {
self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone()) self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone())
} }
/// Prepares new block for sealing including top transactions from queue.
#[cfg_attr(feature="dev", allow(match_same_arms))] #[cfg_attr(feature="dev", allow(match_same_arms))]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] /// Prepares new block for sealing including top transactions from queue.
fn prepare_sealing(&self, chain: &MiningBlockChainClient) { fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option<H256>) {
trace!(target: "miner", "prepare_sealing: entering");
{ {
trace!(target: "miner", "recalibrating..."); trace!(target: "miner", "prepare_block: recalibrating...");
let txq = self.transaction_queue.clone(); let txq = self.transaction_queue.clone();
self.gas_pricer.lock().recalibrate(move |price| { self.gas_pricer.lock().recalibrate(move |price| {
trace!(target: "miner", "Got gas price! {}", price); trace!(target: "miner", "prepare_block: Got gas price! {}", price);
txq.lock().set_minimal_gas_price(price); txq.lock().set_minimal_gas_price(price);
}); });
trace!(target: "miner", "done recalibration."); trace!(target: "miner", "prepare_block: done recalibration.");
} }
let (transactions, mut open_block, original_work_hash) = { let (transactions, mut open_block, original_work_hash) = {
@ -273,13 +274,13 @@ impl Miner {
*/ */
let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) { let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => { Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning"); trace!(target: "miner", "prepare_block: Already have previous work; updating and returning");
// add transactions to old_block // add transactions to old_block
old_block.reopen(&*self.engine) old_block.reopen(&*self.engine)
} }
None => { None => {
// block not found - create it. // block not found - create it.
trace!(target: "miner", "No existing work - making new block"); trace!(target: "miner", "prepare_block: No existing work - making new block");
chain.prepare_open_block( chain.prepare_open_block(
self.author(), self.author(),
(self.gas_floor_target(), self.gas_ceil_target()), (self.gas_floor_target(), self.gas_ceil_target()),
@ -291,6 +292,7 @@ impl Miner {
}; };
let mut invalid_transactions = HashSet::new(); let mut invalid_transactions = HashSet::new();
let mut transactions_to_penalize = HashSet::new();
let block_number = open_block.block().fields().header.number(); let block_number = open_block.block().fields().header.number();
// TODO: push new uncles, too. // TODO: push new uncles, too.
for tx in transactions { for tx in transactions {
@ -298,6 +300,12 @@ impl Miner {
match open_block.push_transaction(tx, None) { match open_block.push_transaction(tx, None) {
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => { Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => {
debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas); debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas);
// Penalize transaction if it's above current gas limit
if gas > gas_limit {
transactions_to_penalize.insert(hash);
}
// Exit early if gas left is smaller then min_tx_gas // Exit early if gas left is smaller then min_tx_gas
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly. let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
if gas_limit - gas_used < min_tx_gas { if gas_limit - gas_used < min_tx_gas {
@ -333,38 +341,83 @@ impl Miner {
for hash in invalid_transactions.into_iter() { for hash in invalid_transactions.into_iter() {
queue.remove_invalid(&hash, &fetch_account); queue.remove_invalid(&hash, &fetch_account);
} }
for hash in transactions_to_penalize {
queue.penalize(&hash);
}
}
(block, original_work_hash)
} }
if !block.transactions().is_empty() { /// Check is reseal is allowed and necessary.
trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal."); fn requires_reseal(&self, best_block: BlockNumber) -> bool {
// block with transactions - see if we can seal immediately. let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions();
let mut sealing_work = self.sealing_work.lock();
if sealing_work.enabled {
trace!(target: "miner", "requires_reseal: sealing enabled");
let last_request = *self.sealing_block_last_request.lock();
let should_disable_sealing = !self.forced_sealing()
&& !has_local_transactions
&& best_block > last_request
&& best_block - last_request > SEALING_TIMEOUT_IN_BLOCKS;
trace!(target: "miner", "requires_reseal: should_disable_sealing={}; best_block={}, last_request={}", should_disable_sealing, best_block, last_request);
if should_disable_sealing {
trace!(target: "miner", "Miner sleeping (current {}, last {})", best_block, last_request);
sealing_work.enabled = false;
sealing_work.queue.reset();
false
} else {
// sealing enabled and we don't want to sleep.
*self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period;
true
}
} else {
trace!(target: "miner", "requires_reseal: sealing is disabled");
false
}
}
/// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed),
/// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine.
fn seal_block_internally(&self, block: ClosedBlock) -> Result<SealedBlock, Option<ClosedBlock>> {
trace!(target: "miner", "seal_block_internally: block has transaction - attempting internal seal.");
let s = self.engine.generate_seal(block.block(), match self.accounts { let s = self.engine.generate_seal(block.block(), match self.accounts {
Some(ref x) => Some(&**x), Some(ref x) => Some(&**x),
None => None, None => None,
}); });
if let Some(seal) = s { if let Some(seal) = s {
trace!(target: "miner", "prepare_sealing: managed internal seal. importing..."); trace!(target: "miner", "seal_block_internally: managed internal seal. importing...");
if let Ok(sealed) = block.lock().try_seal(&*self.engine, seal) { block.lock().try_seal(&*self.engine, seal).or_else(|_| {
if let Ok(_) = chain.import_block(sealed.rlp_bytes()) {
trace!(target: "miner", "prepare_sealing: sealed internally and imported. leaving.");
} else {
warn!("prepare_sealing: ERROR: could not import internally sealed block. WTF?");
}
} else {
warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?"); warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?");
} Err(None)
return; })
} else { } else {
trace!(target: "miner", "prepare_sealing: unable to generate seal internally"); trace!(target: "miner", "seal_block_internally: unable to generate seal internally");
Err(Some(block))
} }
} }
/// Uses Engine to seal the block internally and then imports it to chain.
fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool {
if !block.transactions().is_empty() {
if let Ok(sealed) = self.seal_block_internally(block) {
if chain.import_block(sealed.rlp_bytes()).is_ok() {
return true
}
}
}
false
}
/// Prepares work which has to be done to seal.
fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option<H256>) {
let (work, is_new) = { let (work, is_new) = {
let mut sealing_work = self.sealing_work.lock(); let mut sealing_work = self.sealing_work.lock();
let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash()); let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); trace!(target: "miner", "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash());
let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) { let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) {
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); trace!(target: "miner", "prepare_work: Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
let pow_hash = block.block().fields().header.hash(); let pow_hash = block.block().fields().header.hash();
let number = block.block().fields().header.number(); let number = block.block().fields().header.number();
let difficulty = *block.block().fields().header.difficulty(); let difficulty = *block.block().fields().header.difficulty();
@ -378,7 +431,7 @@ impl Miner {
} else { } else {
(None, false) (None, false)
}; };
trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash())); trace!(target: "miner", "prepare_work: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash()));
(work, is_new) (work, is_new)
}; };
if is_new { if is_new {
@ -392,13 +445,13 @@ impl Miner {
queue.set_gas_limit(gas_limit); queue.set_gas_limit(gas_limit);
} }
/// Returns true if we had to prepare new pending block /// Returns true if we had to prepare new pending block.
fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool { fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool {
trace!(target: "miner", "enable_and_prepare_sealing: entering"); trace!(target: "miner", "prepare_work_sealing: entering");
let prepare_new = { let prepare_new = {
let mut sealing_work = self.sealing_work.lock(); let mut sealing_work = self.sealing_work.lock();
let have_work = sealing_work.queue.peek_last_ref().is_some(); let have_work = sealing_work.queue.peek_last_ref().is_some();
trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work); trace!(target: "miner", "prepare_work_sealing: have_work={}", have_work);
if !have_work { if !have_work {
sealing_work.enabled = true; sealing_work.enabled = true;
true true
@ -411,12 +464,13 @@ impl Miner {
// | NOTE Code below requires transaction_queue and sealing_work locks. | // | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. | // | Make sure to release the locks before calling that method. |
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
self.prepare_sealing(chain); let (block, original_work_hash) = self.prepare_block(chain);
self.prepare_work(block, original_work_hash);
} }
let mut sealing_block_last_request = self.sealing_block_last_request.lock(); let mut sealing_block_last_request = self.sealing_block_last_request.lock();
let best_number = chain.chain_info().best_block_number; let best_number = chain.chain_info().best_block_number;
if *sealing_block_last_request != best_number { if *sealing_block_last_request != best_number {
trace!(target: "miner", "enable_and_prepare_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number); trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
*sealing_block_last_request = best_number; *sealing_block_last_request = best_number;
} }
@ -537,6 +591,10 @@ impl MinerService for Miner {
} }
fn set_author(&self, author: Address) { fn set_author(&self, author: Address) {
if self.seals_internally {
let mut sealing_work = self.sealing_work.lock();
sealing_work.enabled = self.engine.is_sealer(&author).unwrap_or(false);
}
*self.author.write() = author; *self.author.write() = author;
} }
@ -625,6 +683,7 @@ impl MinerService for Miner {
results results
} }
#[cfg_attr(feature="dev", allow(collapsible_if))]
fn import_own_transaction( fn import_own_transaction(
&self, &self,
chain: &MiningBlockChainClient, chain: &MiningBlockChainClient,
@ -635,7 +694,7 @@ impl MinerService for Miner {
trace!(target: "own_tx", "Importing transaction: {:?}", transaction); trace!(target: "own_tx", "Importing transaction: {:?}", transaction);
let imported = { let imported = {
// Be sure to release the lock before we call enable_and_prepare_sealing // Be sure to release the lock before we call prepare_work_sealing
let mut transaction_queue = self.transaction_queue.lock(); let mut transaction_queue = self.transaction_queue.lock();
let import = self.add_transactions_to_queue( let import = self.add_transactions_to_queue(
chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue
@ -661,11 +720,11 @@ impl MinerService for Miner {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() { if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() {
// Make sure to do it after transaction is imported and lock is droped. // Make sure to do it after transaction is imported and lock is droped.
// We need to create pending block and enable sealing // We need to create pending block and enable sealing.
let prepared = self.enable_and_prepare_sealing(chain); if self.seals_internally || !self.prepare_work_sealing(chain) {
// If new block has not been prepared (means we already had one) // If new block has not been prepared (means we already had one)
// we need to update sealing // or Engine might be able to seal internally,
if !prepared { // we need to update sealing.
self.update_sealing(chain); self.update_sealing(chain);
} }
} }
@ -767,44 +826,26 @@ impl MinerService for Miner {
self.transaction_queue.lock().last_nonce(address) self.transaction_queue.lock().last_nonce(address)
} }
/// Update sealing if required.
/// Prepare the block and work if the Engine does not seal internally.
fn update_sealing(&self, chain: &MiningBlockChainClient) { fn update_sealing(&self, chain: &MiningBlockChainClient) {
trace!(target: "miner", "update_sealing"); trace!(target: "miner", "update_sealing");
let requires_reseal = {
let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions();
let mut sealing_work = self.sealing_work.lock();
if sealing_work.enabled {
trace!(target: "miner", "update_sealing: sealing enabled");
let current_no = chain.chain_info().best_block_number;
let last_request = *self.sealing_block_last_request.lock();
let should_disable_sealing = !self.forced_sealing()
&& !has_local_transactions
&& current_no > last_request
&& current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS;
trace!(target: "miner", "update_sealing: should_disable_sealing={}; current_no={}, last_request={}", should_disable_sealing, current_no, last_request); if self.requires_reseal(chain.chain_info().best_block_number) {
if should_disable_sealing {
trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request);
sealing_work.enabled = false;
sealing_work.queue.reset();
false
} else {
// sealing enabled and we don't want to sleep.
*self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period;
true
}
} else {
// sealing is disabled.
false
}
};
if requires_reseal {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// | NOTE Code below requires transaction_queue and sealing_work locks. | // | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. | // | Make sure to release the locks before calling that method. |
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
self.prepare_sealing(chain); trace!(target: "miner", "update_sealing: preparing a block");
let (block, original_work_hash) = self.prepare_block(chain);
if self.seals_internally {
trace!(target: "miner", "update_sealing: engine indicates internal sealing");
self.seal_and_import_block_internally(chain, block);
} else {
trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work");
self.prepare_work(block, original_work_hash);
}
} }
} }
@ -814,7 +855,7 @@ impl MinerService for Miner {
fn map_sealing_work<F, T>(&self, chain: &MiningBlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T { fn map_sealing_work<F, T>(&self, chain: &MiningBlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
trace!(target: "miner", "map_sealing_work: entering"); trace!(target: "miner", "map_sealing_work: entering");
self.enable_and_prepare_sealing(chain); self.prepare_work_sealing(chain);
trace!(target: "miner", "map_sealing_work: sealing prepared"); trace!(target: "miner", "map_sealing_work: sealing prepared");
let mut sealing_work = self.sealing_work.lock(); let mut sealing_work = self.sealing_work.lock();
let ret = sealing_work.queue.use_last_ref(); let ret = sealing_work.queue.use_last_ref();
@ -917,11 +958,12 @@ mod tests {
use super::*; use super::*;
use util::*; use util::*;
use ethkey::{Generator, Random}; use ethkey::{Generator, Random};
use client::{TestBlockChainClient, EachBlockWith}; use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult};
use client::{TransactionImportResult}; use header::BlockNumber;
use types::transaction::{Transaction, Action}; use types::transaction::{Transaction, SignedTransaction, Action};
use block::*; use block::*;
use spec::Spec; use spec::Spec;
use tests::helpers::{generate_dummy_client};
#[test] #[test]
fn should_prepare_block_to_seal() { fn should_prepare_block_to_seal() {
@ -975,12 +1017,7 @@ mod tests {
)).ok().expect("Miner was just created.") )).ok().expect("Miner was just created.")
} }
#[test] fn transaction() -> SignedTransaction {
fn should_make_pending_block_when_importing_own_transaction() {
// given
let client = TestBlockChainClient::default();
let miner = miner();
let transaction = {
let keypair = Random.generate().unwrap(); let keypair = Random.generate().unwrap();
Transaction { Transaction {
action: Action::Create, action: Action::Create,
@ -990,8 +1027,14 @@ mod tests {
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::zero(), nonce: U256::zero(),
}.sign(keypair.secret()) }.sign(keypair.secret())
}; }
#[test]
fn should_make_pending_block_when_importing_own_transaction() {
// given
let client = TestBlockChainClient::default();
let miner = miner();
let transaction = transaction();
// when // when
let res = miner.import_own_transaction(&client, transaction); let res = miner.import_own_transaction(&client, transaction);
@ -1002,7 +1045,7 @@ mod tests {
assert_eq!(miner.pending_transactions_hashes().len(), 1); assert_eq!(miner.pending_transactions_hashes().len(), 1);
assert_eq!(miner.pending_receipts().len(), 1); assert_eq!(miner.pending_receipts().len(), 1);
// This method will let us know if pending block was created (before calling that method) // This method will let us know if pending block was created (before calling that method)
assert_eq!(miner.enable_and_prepare_sealing(&client), false); assert!(!miner.prepare_work_sealing(&client));
} }
#[test] #[test]
@ -1010,18 +1053,7 @@ mod tests {
// given // given
let client = TestBlockChainClient::default(); let client = TestBlockChainClient::default();
let miner = miner(); let miner = miner();
let transaction = { let transaction = transaction();
let keypair = Random.generate().unwrap();
Transaction {
action: Action::Create,
value: U256::zero(),
data: "3331600055".from_hex().unwrap(),
gas: U256::from(100_000),
gas_price: U256::zero(),
nonce: U256::zero(),
}.sign(keypair.secret())
};
// when // when
let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap(); let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap();
@ -1032,6 +1064,41 @@ mod tests {
assert_eq!(miner.pending_transactions().len(), 0); assert_eq!(miner.pending_transactions().len(), 0);
assert_eq!(miner.pending_receipts().len(), 0); assert_eq!(miner.pending_receipts().len(), 0);
// This method will let us know if pending block was created (before calling that method) // This method will let us know if pending block was created (before calling that method)
assert_eq!(miner.enable_and_prepare_sealing(&client), true); assert!(miner.prepare_work_sealing(&client));
}
#[test]
fn should_not_seal_unless_enabled() {
let miner = miner();
let client = TestBlockChainClient::default();
// By default resealing is not required.
assert!(!miner.requires_reseal(1u8.into()));
miner.import_external_transactions(&client, vec![transaction()]).pop().unwrap().unwrap();
assert!(miner.prepare_work_sealing(&client));
// Unless asked to prepare work.
assert!(miner.requires_reseal(1u8.into()));
}
#[test]
fn internal_seals_without_work() {
let miner = Miner::with_spec(&Spec::new_test_instant());
let c = generate_dummy_client(2);
let client = c.reference().as_ref();
assert_eq!(miner.import_external_transactions(client, vec![transaction()]).pop().unwrap().unwrap(), TransactionImportResult::Current);
miner.update_sealing(client);
client.flush_queue();
assert!(miner.pending_block().is_none());
assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber);
assert_eq!(miner.import_own_transaction(client, transaction()).unwrap(), TransactionImportResult::Current);
miner.update_sealing(client);
client.flush_queue();
assert!(miner.pending_block().is_none());
assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber);
} }
} }

View File

@ -81,6 +81,7 @@
//! - It removes all transactions (either from `current` or `future`) with nonce < client nonce //! - It removes all transactions (either from `current` or `future`) with nonce < client nonce
//! - It moves matching `future` transactions to `current` //! - It moves matching `future` transactions to `current`
use std::ops::Deref;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::cmp; use std::cmp;
use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap}; use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap};
@ -133,6 +134,8 @@ struct TransactionOrder {
hash: H256, hash: H256,
/// Origin of the transaction /// Origin of the transaction
origin: TransactionOrigin, origin: TransactionOrigin,
/// Penalties
penalties: usize,
} }
@ -143,6 +146,7 @@ impl TransactionOrder {
gas_price: tx.transaction.gas_price, gas_price: tx.transaction.gas_price,
hash: tx.hash(), hash: tx.hash(),
origin: tx.origin, origin: tx.origin,
penalties: 0,
} }
} }
@ -150,6 +154,11 @@ impl TransactionOrder {
self.nonce_height = nonce - base_nonce; self.nonce_height = nonce - base_nonce;
self self
} }
fn penalize(mut self) -> Self {
self.penalties = self.penalties.saturating_add(1);
self
}
} }
impl Eq for TransactionOrder {} impl Eq for TransactionOrder {}
@ -166,6 +175,11 @@ impl PartialOrd for TransactionOrder {
impl Ord for TransactionOrder { impl Ord for TransactionOrder {
fn cmp(&self, b: &TransactionOrder) -> Ordering { fn cmp(&self, b: &TransactionOrder) -> Ordering {
// First check number of penalties
if self.penalties != b.penalties {
return self.penalties.cmp(&b.penalties);
}
// First check nonce_height // First check nonce_height
if self.nonce_height != b.nonce_height { if self.nonce_height != b.nonce_height {
return self.nonce_height.cmp(&b.nonce_height); return self.nonce_height.cmp(&b.nonce_height);
@ -215,7 +229,48 @@ impl VerifiedTransaction {
} }
fn sender(&self) -> Address { fn sender(&self) -> Address {
self.transaction.sender().unwrap() self.transaction.sender().expect("Sender is verified in new; qed")
}
}
#[derive(Debug, Default)]
struct GasPriceQueue {
backing: BTreeMap<U256, HashSet<H256>>,
}
impl GasPriceQueue {
/// Insert an item into a BTreeMap/HashSet "multimap".
pub fn insert(&mut self, gas_price: U256, hash: H256) -> bool {
self.backing.entry(gas_price).or_insert_with(Default::default).insert(hash)
}
/// Remove an item from a BTreeMap/HashSet "multimap".
/// Returns true if the item was removed successfully.
pub fn remove(&mut self, gas_price: &U256, hash: &H256) -> bool {
if let Some(mut hashes) = self.backing.get_mut(gas_price) {
let only_one_left = hashes.len() == 1;
if !only_one_left {
// Operation may be ok: only if hash is in gas-price's Set.
return hashes.remove(hash);
}
if hash != hashes.iter().next().expect("We know there is only one element in collection, tested above; qed") {
// Operation failed: hash not the single item in gas-price's Set.
return false;
}
} else {
// Operation failed: gas-price not found in Map.
return false;
}
// Operation maybe ok: only if hash not found in gas-price Set.
self.backing.remove(gas_price).is_some()
}
}
impl Deref for GasPriceQueue {
type Target=BTreeMap<U256, HashSet<H256>>;
fn deref(&self) -> &Self::Target {
&self.backing
} }
} }
@ -227,7 +282,7 @@ impl VerifiedTransaction {
struct TransactionSet { struct TransactionSet {
by_priority: BTreeSet<TransactionOrder>, by_priority: BTreeSet<TransactionOrder>,
by_address: Table<Address, U256, TransactionOrder>, by_address: Table<Address, U256, TransactionOrder>,
by_gas_price: BTreeMap<U256, HashSet<H256>>, by_gas_price: GasPriceQueue,
limit: usize, limit: usize,
} }
@ -245,12 +300,12 @@ impl TransactionSet {
// If transaction was replaced remove it from priority queue // If transaction was replaced remove it from priority queue
if let Some(ref old_order) = by_address_replaced { if let Some(ref old_order) = by_address_replaced {
assert!(self.by_priority.remove(old_order), "hash is in `by_address`; all transactions in `by_address` must be in `by_priority`; qed"); assert!(self.by_priority.remove(old_order), "hash is in `by_address`; all transactions in `by_address` must be in `by_priority`; qed");
assert!(Self::remove_item(&mut self.by_gas_price, &old_order.gas_price, &old_order.hash), assert!(self.by_gas_price.remove(&old_order.gas_price, &old_order.hash),
"hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed");
} }
Self::insert_item(&mut self.by_gas_price, order_gas_price, order_hash); self.by_gas_price.insert(order_gas_price, order_hash);
debug_assert_eq!(self.by_priority.len(), self.by_address.len()); assert_eq!(self.by_priority.len(), self.by_address.len());
debug_assert_eq!(self.by_gas_price.iter().map(|(_, v)| v.len()).fold(0, |a, b| a + b), self.by_address.len()); assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len());
by_address_replaced by_address_replaced
} }
@ -263,6 +318,7 @@ impl TransactionSet {
if len <= self.limit { if len <= self.limit {
return None; return None;
} }
let to_drop : Vec<(Address, U256)> = { let to_drop : Vec<(Address, U256)> = {
self.by_priority self.by_priority
.iter() .iter()
@ -290,13 +346,16 @@ impl TransactionSet {
/// Drop transaction from this set (remove from `by_priority` and `by_address`) /// Drop transaction from this set (remove from `by_priority` and `by_address`)
fn drop(&mut self, sender: &Address, nonce: &U256) -> Option<TransactionOrder> { fn drop(&mut self, sender: &Address, nonce: &U256) -> Option<TransactionOrder> {
if let Some(tx_order) = self.by_address.remove(sender, nonce) { if let Some(tx_order) = self.by_address.remove(sender, nonce) {
assert!(Self::remove_item(&mut self.by_gas_price, &tx_order.gas_price, &tx_order.hash), assert!(self.by_gas_price.remove(&tx_order.gas_price, &tx_order.hash),
"hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed");
self.by_priority.remove(&tx_order); assert!(self.by_priority.remove(&tx_order),
"hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_priority`; qed");
assert_eq!(self.by_priority.len(), self.by_address.len()); assert_eq!(self.by_priority.len(), self.by_address.len());
assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len());
return Some(tx_order); return Some(tx_order);
} }
assert_eq!(self.by_priority.len(), self.by_address.len()); assert_eq!(self.by_priority.len(), self.by_address.len());
assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len());
None None
} }
@ -304,7 +363,7 @@ impl TransactionSet {
fn clear(&mut self) { fn clear(&mut self) {
self.by_priority.clear(); self.by_priority.clear();
self.by_address.clear(); self.by_address.clear();
self.by_gas_price.clear(); self.by_gas_price.backing.clear();
} }
/// Sets new limit for number of transactions in this `TransactionSet`. /// Sets new limit for number of transactions in this `TransactionSet`.
@ -321,32 +380,6 @@ impl TransactionSet {
_ => U256::default(), _ => U256::default(),
} }
} }
/// Insert an item into a BTreeMap/HashSet "multimap".
fn insert_item(into: &mut BTreeMap<U256, HashSet<H256>>, gas_price: U256, hash: H256) -> bool {
into.entry(gas_price).or_insert_with(Default::default).insert(hash)
}
/// Remove an item from a BTreeMap/HashSet "multimap".
/// Returns true if the item was removed successfully.
fn remove_item(from: &mut BTreeMap<U256, HashSet<H256>>, gas_price: &U256, hash: &H256) -> bool {
if let Some(mut hashes) = from.get_mut(gas_price) {
let only_one_left = hashes.len() == 1;
if !only_one_left {
// Operation may be ok: only if hash is in gas-price's Set.
return hashes.remove(hash);
}
if hashes.iter().next().unwrap() != hash {
// Operation failed: hash not the single item in gas-price's Set.
return false;
}
} else {
// Operation failed: gas-price not found in Map.
return false;
}
// Operation maybe ok: only if hash not found in gas-price Set.
from.remove(gas_price).is_some()
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -367,7 +400,7 @@ pub struct AccountDetails {
} }
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue. /// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
const GAS_LIMIT_HYSTERESIS: usize = 10; // % const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) %
/// `TransactionQueue` implementation /// `TransactionQueue` implementation
pub struct TransactionQueue { pub struct TransactionQueue {
@ -486,8 +519,6 @@ impl TransactionQueue {
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result<TransactionImportResult, Error> pub fn add<T>(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result<TransactionImportResult, Error>
where T: Fn(&Address) -> AccountDetails { where T: Fn(&Address) -> AccountDetails {
trace!(target: "txqueue", "Importing: {:?}", tx.hash());
if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local { if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local {
trace!(target: "txqueue", trace!(target: "txqueue",
"Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})", "Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})",
@ -573,6 +604,39 @@ impl TransactionQueue {
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len()); assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
} }
/// Penalize transactions from sender of transaction with given hash.
/// I.e. it should change the priority of the transaction in the queue.
///
/// NOTE: We need to penalize all transactions from particular sender
/// to avoid breaking invariants in queue (ordered by nonces).
/// Consecutive transactions from this sender would fail otherwise (because of invalid nonce).
pub fn penalize(&mut self, transaction_hash: &H256) {
let transaction = match self.by_hash.get(transaction_hash) {
None => return,
Some(t) => t,
};
let sender = transaction.sender();
// Penalize all transactions from this sender
let nonces_from_sender = match self.current.by_address.row(&sender) {
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
None => vec![],
};
for k in nonces_from_sender {
let order = self.current.drop(&sender, &k).unwrap();
self.current.insert(sender, k, order.penalize());
}
// Same thing for future
let nonces_from_sender = match self.future.by_address.row(&sender) {
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
None => vec![],
};
for k in nonces_from_sender {
let order = self.future.drop(&sender, &k).unwrap();
self.current.insert(sender, k, order.penalize());
}
}
/// Removes invalid transaction identified by hash from queue. /// Removes invalid transaction identified by hash from queue.
/// Assumption is that this transaction nonce is not related to client nonce, /// Assumption is that this transaction nonce is not related to client nonce,
/// so transactions left in queue are processed according to client nonce. /// so transactions left in queue are processed according to client nonce.
@ -588,7 +652,7 @@ impl TransactionQueue {
return; return;
} }
let transaction = transaction.unwrap(); let transaction = transaction.expect("None is tested in early-exit condition above; qed");
let sender = transaction.sender(); let sender = transaction.sender();
let nonce = transaction.nonce(); let nonce = transaction.nonce();
let current_nonce = fetch_account(&sender).nonce; let current_nonce = fetch_account(&sender).nonce;
@ -623,7 +687,7 @@ impl TransactionQueue {
None => vec![], None => vec![],
}; };
for k in all_nonces_from_sender { for k in all_nonces_from_sender {
let order = self.future.drop(sender, &k).unwrap(); let order = self.future.drop(sender, &k).expect("iterating over a collection that has been retrieved above; qed");
if k >= current_nonce { if k >= current_nonce {
self.future.insert(*sender, k, order.update_height(k, current_nonce)); self.future.insert(*sender, k, order.update_height(k, current_nonce));
} else { } else {
@ -644,7 +708,8 @@ impl TransactionQueue {
for k in all_nonces_from_sender { for k in all_nonces_from_sender {
// Goes to future or is removed // Goes to future or is removed
let order = self.current.drop(sender, &k).unwrap(); let order = self.current.drop(sender, &k).expect("iterating over a collection that has been retrieved above;
qed");
if k >= current_nonce { if k >= current_nonce {
self.future.insert(*sender, k, order.update_height(k, current_nonce)); self.future.insert(*sender, k, order.update_height(k, current_nonce));
} else { } else {
@ -704,10 +769,11 @@ impl TransactionQueue {
if let None = by_nonce { if let None = by_nonce {
return; return;
} }
let mut by_nonce = by_nonce.unwrap(); let mut by_nonce = by_nonce.expect("None is tested in early-exit condition above; qed");
while let Some(order) = by_nonce.remove(&current_nonce) { while let Some(order) = by_nonce.remove(&current_nonce) {
// remove also from priority and hash // remove also from priority and gas_price
self.future.by_priority.remove(&order); self.future.by_priority.remove(&order);
self.future.by_gas_price.remove(&order.gas_price, &order.hash);
// Put to current // Put to current
let order = order.update_height(current_nonce, first_nonce); let order = order.update_height(current_nonce, first_nonce);
self.current.insert(address, current_nonce, order); self.current.insert(address, current_nonce, order);
@ -742,6 +808,7 @@ impl TransactionQueue {
let address = tx.sender(); let address = tx.sender();
let nonce = tx.nonce(); let nonce = tx.nonce();
let hash = tx.hash();
let next_nonce = self.last_nonces let next_nonce = self.last_nonces
.get(&address) .get(&address)
@ -763,6 +830,9 @@ impl TransactionQueue {
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash))); try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash)));
// Return an error if this transaction is not imported because of limit. // Return an error if this transaction is not imported because of limit.
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash))); try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
debug!(target: "txqueue", "Importing transaction to future: {:?}", hash);
debug!(target: "txqueue", "status: {:?}", self.status());
return Ok(TransactionImportResult::Future); return Ok(TransactionImportResult::Future);
} }
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash))); try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
@ -789,7 +859,8 @@ impl TransactionQueue {
// Trigger error if the transaction we are importing was removed. // Trigger error if the transaction we are importing was removed.
try!(check_if_removed(&address, &nonce, removed)); try!(check_if_removed(&address, &nonce, removed));
trace!(target: "txqueue", "status: {:?}", self.status()); debug!(target: "txqueue", "Imported transaction to current: {:?}", hash);
debug!(target: "txqueue", "status: {:?}", self.status());
Ok(TransactionImportResult::Current) Ok(TransactionImportResult::Current)
} }
@ -923,11 +994,22 @@ mod test {
(tx1.sign(secret), tx2.sign(secret)) (tx1.sign(secret), tx2.sign(secret))
} }
/// Returns two consecutive transactions, both with increased gas price
fn new_tx_pair_with_gas_price_increment(gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) {
let gas = default_gas_price() + gas_price_increment;
let tx1 = new_unsigned_tx(default_nonce(), gas);
let tx2 = new_unsigned_tx(default_nonce() + 1.into(), gas);
let keypair = Random.generate().unwrap();
let secret = &keypair.secret();
(tx1.sign(secret), tx2.sign(secret))
}
fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) {
new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment) new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment)
} }
/// Returns two transactions with identical (sender, nonce) but different gas_price/hash. /// Returns two transactions with identical (sender, nonce) but different gas price/hash.
fn new_similar_tx_pair() -> (SignedTransaction, SignedTransaction) { fn new_similar_tx_pair() -> (SignedTransaction, SignedTransaction) {
new_tx_pair_default(0.into(), 1.into()) new_tx_pair_default(0.into(), 1.into())
} }
@ -1310,6 +1392,39 @@ mod test {
assert_eq!(top.len(), 2); assert_eq!(top.len(), 2);
} }
#[test]
fn should_penalize_transactions_from_sender() {
// given
let mut txq = TransactionQueue::new();
// txa, txb - slightly bigger gas price to have consistent ordering
let (txa, txb) = new_tx_pair_default(1.into(), 0.into());
let (tx1, tx2) = new_tx_pair_with_gas_price_increment(3.into());
// insert everything
txq.add(txa.clone(), &default_account_details, TransactionOrigin::External).unwrap();
txq.add(txb.clone(), &default_account_details, TransactionOrigin::External).unwrap();
txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap();
txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap();
let top = txq.top_transactions();
assert_eq!(top[0], tx1);
assert_eq!(top[1], txa);
assert_eq!(top[2], tx2);
assert_eq!(top[3], txb);
assert_eq!(top.len(), 4);
// when
txq.penalize(&tx1.hash());
// then
let top = txq.top_transactions();
assert_eq!(top[0], txa);
assert_eq!(top[1], txb);
assert_eq!(top[2], tx1);
assert_eq!(top[3], tx2);
assert_eq!(top.len(), 4);
}
#[test] #[test]
fn should_return_pending_hashes() { fn should_return_pending_hashes() {
// given // given
@ -1395,6 +1510,9 @@ mod test {
let stats = txq.status(); let stats = txq.status();
assert_eq!(stats.pending, 3); assert_eq!(stats.pending, 3);
assert_eq!(stats.future, 0); assert_eq!(stats.future, 0);
assert_eq!(txq.future.by_priority.len(), 0);
assert_eq!(txq.future.by_address.len(), 0);
assert_eq!(txq.future.by_gas_price.len(), 0);
} }
#[test] #[test]

View File

@ -94,7 +94,6 @@ impl ClientService {
pruning: pruning, pruning: pruning,
channel: io_service.channel(), channel: io_service.channel(),
snapshot_root: snapshot_path.into(), snapshot_root: snapshot_path.into(),
client_db: client_path.into(),
db_restore: client.clone(), db_restore: client.clone(),
}; };
let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params))); let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params)));
@ -187,7 +186,7 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); }
ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); }
ClientIoMessage::BeginRestoration(ref manifest) => { ClientIoMessage::BeginRestoration(ref manifest) => {
if let Err(e) = self.snapshot.init_restore(manifest.clone()) { if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) {
warn!("Failed to initialize snapshot restoration: {}", e); warn!("Failed to initialize snapshot restoration: {}", e);
} }
} }

View File

@ -92,7 +92,8 @@ impl Account {
let mut pairs = Vec::new(); let mut pairs = Vec::new();
for (k, v) in db.iter() { for item in try!(db.iter()) {
let (k, v) = try!(item);
pairs.push((k, v)); pairs.push((k, v));
} }

View File

@ -21,10 +21,10 @@ use header::Header;
use views::BlockView; use views::BlockView;
use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View}; use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View};
use rlp::{Compressible, RlpType};
use util::{Bytes, Hashable, H256}; use util::{Bytes, Hashable, H256};
use util::triehash::ordered_trie_root;
const HEADER_FIELDS: usize = 10; const HEADER_FIELDS: usize = 8;
const BLOCK_FIELDS: usize = 2; const BLOCK_FIELDS: usize = 2;
pub struct AbridgedBlock { pub struct AbridgedBlock {
@ -61,8 +61,6 @@ impl AbridgedBlock {
stream stream
.append(&header.author()) .append(&header.author())
.append(&header.state_root()) .append(&header.state_root())
.append(&header.transactions_root())
.append(&header.receipts_root())
.append(&header.log_bloom()) .append(&header.log_bloom())
.append(&header.difficulty()) .append(&header.difficulty())
.append(&header.gas_limit()) .append(&header.gas_limit())
@ -79,33 +77,35 @@ impl AbridgedBlock {
} }
AbridgedBlock { AbridgedBlock {
rlp: UntrustedRlp::new(stream.as_raw()).compress(RlpType::Blocks).to_vec(), rlp: stream.out(),
} }
} }
/// Flesh out an abridged block view with the provided parent hash and block number. /// Flesh out an abridged block view with the provided parent hash and block number.
/// ///
/// Will fail if contains invalid rlp. /// Will fail if contains invalid rlp.
pub fn to_block(&self, parent_hash: H256, number: u64) -> Result<Block, DecoderError> { pub fn to_block(&self, parent_hash: H256, number: u64, receipts_root: H256) -> Result<Block, DecoderError> {
let rlp = UntrustedRlp::new(&self.rlp).decompress(RlpType::Blocks); let rlp = UntrustedRlp::new(&self.rlp);
let rlp = UntrustedRlp::new(&rlp);
let mut header: Header = Default::default(); let mut header: Header = Default::default();
header.set_parent_hash(parent_hash); header.set_parent_hash(parent_hash);
header.set_author(try!(rlp.val_at(0))); header.set_author(try!(rlp.val_at(0)));
header.set_state_root(try!(rlp.val_at(1))); header.set_state_root(try!(rlp.val_at(1)));
header.set_transactions_root(try!(rlp.val_at(2))); header.set_log_bloom(try!(rlp.val_at(2)));
header.set_receipts_root(try!(rlp.val_at(3))); header.set_difficulty(try!(rlp.val_at(3)));
header.set_log_bloom(try!(rlp.val_at(4)));
header.set_difficulty(try!(rlp.val_at(5)));
header.set_number(number); header.set_number(number);
header.set_gas_limit(try!(rlp.val_at(6))); header.set_gas_limit(try!(rlp.val_at(4)));
header.set_gas_used(try!(rlp.val_at(7))); header.set_gas_used(try!(rlp.val_at(5)));
header.set_timestamp(try!(rlp.val_at(8))); header.set_timestamp(try!(rlp.val_at(6)));
header.set_extra_data(try!(rlp.val_at(9))); header.set_extra_data(try!(rlp.val_at(7)));
let transactions = try!(rlp.val_at(10)); let transactions = try!(rlp.val_at(8));
let uncles: Vec<Header> = try!(rlp.val_at(11)); let uncles: Vec<Header> = try!(rlp.val_at(9));
header.set_transactions_root(ordered_trie_root(
try!(rlp.at(8)).iter().map(|r| r.as_raw().to_owned())
));
header.set_receipts_root(receipts_root);
let mut uncles_rlp = RlpStream::new(); let mut uncles_rlp = RlpStream::new();
uncles_rlp.append(&uncles); uncles_rlp.append(&uncles);
@ -143,20 +143,22 @@ mod tests {
#[test] #[test]
fn empty_block_abridging() { fn empty_block_abridging() {
let b = Block::default(); let b = Block::default();
let receipts_root = b.header.receipts_root().clone();
let encoded = encode_block(&b); let encoded = encode_block(&b);
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded)); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b); assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b);
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn wrong_number() { fn wrong_number() {
let b = Block::default(); let b = Block::default();
let receipts_root = b.header.receipts_root().clone();
let encoded = encode_block(&b); let encoded = encode_block(&b);
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded)); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
assert_eq!(abridged.to_block(H256::new(), 2).unwrap(), b); assert_eq!(abridged.to_block(H256::new(), 2, receipts_root).unwrap(), b);
} }
#[test] #[test]
@ -184,9 +186,14 @@ mod tests {
b.transactions.push(t1); b.transactions.push(t1);
b.transactions.push(t2); b.transactions.push(t2);
let receipts_root = b.header.receipts_root().clone();
b.header.set_transactions_root(::util::triehash::ordered_trie_root(
b.transactions.iter().map(::rlp::encode).map(|out| out.to_vec())
));
let encoded = encode_block(&b); let encoded = encode_block(&b);
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..])); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..]));
assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b); assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b);
} }
} }

View File

@ -15,6 +15,9 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Snapshot creation, restoration, and network service. //! Snapshot creation, restoration, and network service.
//!
//! Documentation of the format can be found at
//! https://github.com/ethcore/parity/wiki/%22PV64%22-Snapshot-Format
use std::collections::{HashMap, HashSet, VecDeque}; use std::collections::{HashMap, HashSet, VecDeque};
use std::sync::Arc; use std::sync::Arc;
@ -34,7 +37,7 @@ use util::journaldb::{self, Algorithm, JournalDB};
use util::kvdb::Database; use util::kvdb::Database;
use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut};
use util::sha3::SHA3_NULL_RLP; use util::sha3::SHA3_NULL_RLP;
use rlp::{RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; use rlp::{RlpStream, Stream, UntrustedRlp, View};
use self::account::Account; use self::account::Account;
use self::block::AbridgedBlock; use self::block::AbridgedBlock;
@ -358,15 +361,15 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
let mut used_code = HashSet::new(); let mut used_code = HashSet::new();
// account_key here is the address' hash. // account_key here is the address' hash.
for (account_key, account_data) in account_trie.iter() { for item in try!(account_trie.iter()) {
let (account_key, account_data) = try!(item);
let account = Account::from_thin_rlp(account_data); let account = Account::from_thin_rlp(account_data);
let account_key_hash = H256::from_slice(&account_key); let account_key_hash = H256::from_slice(&account_key);
let account_db = AccountDB::from_hash(db, account_key_hash); let account_db = AccountDB::from_hash(db, account_key_hash);
let fat_rlp = try!(account.to_fat_rlp(&account_db, &mut used_code)); let fat_rlp = try!(account.to_fat_rlp(&account_db, &mut used_code));
let compressed_rlp = UntrustedRlp::new(&fat_rlp).compress(RlpType::Snapshot).to_vec(); try!(chunker.push(account_key, fat_rlp));
try!(chunker.push(account_key, compressed_rlp));
} }
if chunker.cur_size != 0 { if chunker.cur_size != 0 {
@ -507,8 +510,7 @@ fn rebuild_accounts(
let account_rlp = UntrustedRlp::new(account_pair); let account_rlp = UntrustedRlp::new(account_pair);
let hash: H256 = try!(account_rlp.val_at(0)); let hash: H256 = try!(account_rlp.val_at(0));
let decompressed = try!(account_rlp.at(1)).decompress(RlpType::Snapshot); let fat_rlp = try!(account_rlp.at(1));
let fat_rlp = UntrustedRlp::new(&decompressed[..]);
let thin_rlp = { let thin_rlp = {
let mut acct_db = AccountDBMut::from_hash(db, hash); let mut acct_db = AccountDBMut::from_hash(db, hash);
@ -569,6 +571,7 @@ impl BlockRebuilder {
pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result<u64, ::error::Error> { pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result<u64, ::error::Error> {
use basic_types::Seal::With; use basic_types::Seal::With;
use util::U256; use util::U256;
use util::triehash::ordered_trie_root;
let rlp = UntrustedRlp::new(chunk); let rlp = UntrustedRlp::new(chunk);
let item_count = rlp.item_count(); let item_count = rlp.item_count();
@ -585,7 +588,11 @@ impl BlockRebuilder {
let abridged_rlp = try!(pair.at(0)).as_raw().to_owned(); let abridged_rlp = try!(pair.at(0)).as_raw().to_owned();
let abridged_block = AbridgedBlock::from_raw(abridged_rlp); let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1)); let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1));
let block = try!(abridged_block.to_block(parent_hash, cur_number)); let receipts_root = ordered_trie_root(
try!(pair.at(1)).iter().map(|r| r.as_raw().to_owned())
);
let block = try!(abridged_block.to_block(parent_hash, cur_number, receipts_root));
let block_bytes = block.rlp_bytes(With); let block_bytes = block.rlp_bytes(With);
if self.rng.gen::<f32>() <= POW_VERIFY_RATE { if self.rng.gen::<f32>() <= POW_VERIFY_RATE {

View File

@ -27,7 +27,7 @@ use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, Sna
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
use blockchain::BlockChain; use blockchain::BlockChain;
use client::Client; use client::{BlockChainClient, Client};
use engines::Engine; use engines::Engine;
use error::Error; use error::Error;
use ids::BlockID; use ids::BlockID;
@ -35,7 +35,7 @@ use service::ClientIoMessage;
use io::IoChannel; use io::IoChannel;
use util::{Bytes, H256, Mutex, RwLock, UtilError}; use util::{Bytes, H256, Mutex, RwLock, RwLockReadGuard, UtilError};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use util::kvdb::{Database, DatabaseConfig}; use util::kvdb::{Database, DatabaseConfig};
use util::snappy; use util::snappy;
@ -70,7 +70,7 @@ struct Restoration {
block_chunks_left: HashSet<H256>, block_chunks_left: HashSet<H256>,
state: StateRebuilder, state: StateRebuilder,
blocks: BlockRebuilder, blocks: BlockRebuilder,
writer: LooseWriter, writer: Option<LooseWriter>,
snappy_buffer: Bytes, snappy_buffer: Bytes,
final_state_root: H256, final_state_root: H256,
guard: Guard, guard: Guard,
@ -80,8 +80,8 @@ struct RestorationParams<'a> {
manifest: ManifestData, // manifest to base restoration on. manifest: ManifestData, // manifest to base restoration on.
pruning: Algorithm, // pruning algorithm for the database. pruning: Algorithm, // pruning algorithm for the database.
db_path: PathBuf, // database path db_path: PathBuf, // database path
db_config: &'a DatabaseConfig, db_config: &'a DatabaseConfig, // configuration for the database.
writer: LooseWriter, // writer for recovered snapshot. writer: Option<LooseWriter>, // writer for recovered snapshot.
genesis: &'a [u8], // genesis block of the chain. genesis: &'a [u8], // genesis block of the chain.
guard: Guard, // guard for the restoration directory. guard: Guard, // guard for the restoration directory.
} }
@ -120,7 +120,10 @@ impl Restoration {
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
try!(self.state.feed(&self.snappy_buffer[..len])); try!(self.state.feed(&self.snappy_buffer[..len]));
try!(self.writer.write_state_chunk(hash, chunk));
if let Some(ref mut writer) = self.writer.as_mut() {
try!(writer.write_state_chunk(hash, chunk));
}
} }
Ok(()) Ok(())
@ -132,7 +135,9 @@ impl Restoration {
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); try!(self.blocks.feed(&self.snappy_buffer[..len], engine));
try!(self.writer.write_block_chunk(hash, chunk)); if let Some(ref mut writer) = self.writer.as_mut() {
try!(writer.write_block_chunk(hash, chunk));
}
} }
Ok(()) Ok(())
@ -157,7 +162,9 @@ impl Restoration {
// connect out-of-order chunks. // connect out-of-order chunks.
self.blocks.glue_chunks(); self.blocks.glue_chunks();
try!(self.writer.finish(self.manifest)); if let Some(writer) = self.writer {
try!(writer.finish(self.manifest));
}
self.guard.disarm(); self.guard.disarm();
Ok(()) Ok(())
@ -187,9 +194,6 @@ pub struct ServiceParams {
/// The directory to put snapshots in. /// The directory to put snapshots in.
/// Usually "<chain hash>/snapshot" /// Usually "<chain hash>/snapshot"
pub snapshot_root: PathBuf, pub snapshot_root: PathBuf,
/// The client's database directory.
/// Usually "<chain hash>/<pruning>/db".
pub client_db: PathBuf,
/// A handle for database restoration. /// A handle for database restoration.
pub db_restore: Arc<DatabaseRestore>, pub db_restore: Arc<DatabaseRestore>,
} }
@ -198,7 +202,6 @@ pub struct ServiceParams {
/// This controls taking snapshots and restoring from them. /// This controls taking snapshots and restoring from them.
pub struct Service { pub struct Service {
restoration: Mutex<Option<Restoration>>, restoration: Mutex<Option<Restoration>>,
client_db: PathBuf,
snapshot_root: PathBuf, snapshot_root: PathBuf,
db_config: DatabaseConfig, db_config: DatabaseConfig,
io_channel: Channel, io_channel: Channel,
@ -219,7 +222,6 @@ impl Service {
pub fn new(params: ServiceParams) -> Result<Self, Error> { pub fn new(params: ServiceParams) -> Result<Self, Error> {
let mut service = Service { let mut service = Service {
restoration: Mutex::new(None), restoration: Mutex::new(None),
client_db: params.client_db,
snapshot_root: params.snapshot_root, snapshot_root: params.snapshot_root,
db_config: params.db_config, db_config: params.db_config,
io_channel: params.channel, io_channel: params.channel,
@ -301,11 +303,15 @@ impl Service {
fn replace_client_db(&self) -> Result<(), Error> { fn replace_client_db(&self) -> Result<(), Error> {
let our_db = self.restoration_db(); let our_db = self.restoration_db();
trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db); try!(self.db_restore.restore_db(&*our_db.to_string_lossy()));
try!(self.db_restore.restore_db(our_db.to_str().unwrap()));
Ok(()) Ok(())
} }
/// Get a reference to the snapshot reader.
pub fn reader(&self) -> RwLockReadGuard<Option<LooseReader>> {
self.reader.read()
}
/// Tick the snapshot service. This will log any active snapshot /// Tick the snapshot service. This will log any active snapshot
/// being taken. /// being taken.
pub fn tick(&self) { pub fn tick(&self) {
@ -339,7 +345,17 @@ impl Service {
let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress); let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress);
self.taking_snapshot.store(false, Ordering::SeqCst); self.taking_snapshot.store(false, Ordering::SeqCst);
try!(res); if let Err(e) = res {
if client.chain_info().best_block_number >= num + ::client::HISTORY {
// "Cancelled" is mincing words a bit -- what really happened
// is that the state we were snapshotting got pruned out
// before we could finish.
info!("Cancelled prematurely-started periodic snapshot.");
return Ok(())
} else {
return Err(e);
}
}
info!("Finished taking snapshot at #{}", num); info!("Finished taking snapshot at #{}", num);
@ -348,6 +364,10 @@ impl Service {
// destroy the old snapshot reader. // destroy the old snapshot reader.
*reader = None; *reader = None;
if snapshot_dir.exists() {
try!(fs::remove_dir_all(&snapshot_dir));
}
try!(fs::rename(temp_dir, &snapshot_dir)); try!(fs::rename(temp_dir, &snapshot_dir));
*reader = Some(try!(LooseReader::new(snapshot_dir))); *reader = Some(try!(LooseReader::new(snapshot_dir)));
@ -357,11 +377,15 @@ impl Service {
} }
/// Initialize the restoration synchronously. /// Initialize the restoration synchronously.
pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> { /// The recover flag indicates whether to recover the restored snapshot.
pub fn init_restore(&self, manifest: ManifestData, recover: bool) -> Result<(), Error> {
let rest_dir = self.restoration_dir(); let rest_dir = self.restoration_dir();
let mut res = self.restoration.lock(); let mut res = self.restoration.lock();
self.state_chunks.store(0, Ordering::SeqCst);
self.block_chunks.store(0, Ordering::SeqCst);
// tear down existing restoration. // tear down existing restoration.
*res = None; *res = None;
@ -376,7 +400,10 @@ impl Service {
try!(fs::create_dir_all(&rest_dir)); try!(fs::create_dir_all(&rest_dir));
// make new restoration. // make new restoration.
let writer = try!(LooseWriter::new(self.temp_recovery_dir())); let writer = match recover {
true => Some(try!(LooseWriter::new(self.temp_recovery_dir()))),
false => None
};
let params = RestorationParams { let params = RestorationParams {
manifest: manifest, manifest: manifest,
@ -391,8 +418,8 @@ impl Service {
*res = Some(try!(Restoration::new(params))); *res = Some(try!(Restoration::new(params)));
*self.status.lock() = RestorationStatus::Ongoing { *self.status.lock() = RestorationStatus::Ongoing {
state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32, state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32,
block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32, block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32,
}; };
Ok(()) Ok(())
} }
@ -403,35 +430,30 @@ impl Service {
fn finalize_restoration(&self, rest: &mut Option<Restoration>) -> Result<(), Error> { fn finalize_restoration(&self, rest: &mut Option<Restoration>) -> Result<(), Error> {
trace!(target: "snapshot", "finalizing restoration"); trace!(target: "snapshot", "finalizing restoration");
self.state_chunks.store(0, Ordering::SeqCst); let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some());
self.block_chunks.store(0, Ordering::SeqCst);
// destroy the restoration before replacing databases and snapshot. // destroy the restoration before replacing databases and snapshot.
try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(()))); try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(())));
try!(self.replace_client_db()); try!(self.replace_client_db());
if recover {
let mut reader = self.reader.write(); let mut reader = self.reader.write();
*reader = None; // destroy the old reader if it existed. *reader = None; // destroy the old reader if it existed.
let snapshot_dir = self.snapshot_dir(); let snapshot_dir = self.snapshot_dir();
if snapshot_dir.exists() {
trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy());
if let Err(e) = fs::remove_dir_all(&snapshot_dir) { try!(fs::remove_dir_all(&snapshot_dir));
match e.kind() {
ErrorKind::NotFound => {}
_ => return Err(e.into()),
} }
}
try!(fs::create_dir(&snapshot_dir));
trace!(target: "snapshot", "copying restored snapshot files over"); trace!(target: "snapshot", "copying restored snapshot files over");
try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir));
let _ = fs::remove_dir_all(self.restoration_dir());
*reader = Some(try!(LooseReader::new(snapshot_dir))); *reader = Some(try!(LooseReader::new(snapshot_dir)));
}
let _ = fs::remove_dir_all(self.restoration_dir());
*self.status.lock() = RestorationStatus::Inactive; *self.status.lock() = RestorationStatus::Inactive;
Ok(()) Ok(())
@ -512,7 +534,13 @@ impl SnapshotService for Service {
} }
fn status(&self) -> RestorationStatus { fn status(&self) -> RestorationStatus {
*self.status.lock() let mut cur_status = self.status.lock();
if let RestorationStatus::Ongoing { ref mut state_chunks_done, ref mut block_chunks_done } = *cur_status {
*state_chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32;
*block_chunks_done = self.block_chunks.load(Ordering::SeqCst) as u32;
}
cur_status.clone()
} }
fn begin_restore(&self, manifest: ManifestData) { fn begin_restore(&self, manifest: ManifestData) {
@ -523,12 +551,6 @@ impl SnapshotService for Service {
fn abort_restore(&self) { fn abort_restore(&self) {
*self.restoration.lock() = None; *self.restoration.lock() = None;
*self.status.lock() = RestorationStatus::Inactive; *self.status.lock() = RestorationStatus::Inactive;
if let Err(e) = fs::remove_dir_all(&self.restoration_dir()) {
match e.kind() {
ErrorKind::NotFound => {},
_ => warn!("encountered error {} while deleting snapshot restoration dir.", e),
}
}
} }
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
@ -585,7 +607,6 @@ mod tests {
pruning: Algorithm::Archive, pruning: Algorithm::Archive,
channel: service.channel(), channel: service.channel(),
snapshot_root: dir, snapshot_root: dir,
client_db: client_db,
db_restore: Arc::new(NoopDBRestore), db_restore: Arc::new(NoopDBRestore),
}; };

View File

@ -45,14 +45,16 @@ impl StateProducer {
} }
} }
#[cfg_attr(feature="dev", allow(let_and_return))]
/// Tick the state producer. This alters the state, writing new data into /// Tick the state producer. This alters the state, writing new data into
/// the database. /// the database.
pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut HashDB) { pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut HashDB) {
// modify existing accounts. // modify existing accounts.
let mut accounts_to_modify: Vec<_> = { let mut accounts_to_modify: Vec<_> = {
let trie = TrieDB::new(&*db, &self.state_root).unwrap(); let trie = TrieDB::new(&*db, &self.state_root).unwrap();
let temp = trie.iter() // binding required due to complicated lifetime stuff let temp = trie.iter().unwrap() // binding required due to complicated lifetime stuff
.filter(|_| rng.gen::<f32>() < ACCOUNT_CHURN) .filter(|_| rng.gen::<f32>() < ACCOUNT_CHURN)
.map(Result::unwrap)
.map(|(k, v)| (H256::from_slice(&k), v.to_owned())) .map(|(k, v)| (H256::from_slice(&k), v.to_owned()))
.collect(); .collect();

View File

@ -18,6 +18,7 @@
mod blocks; mod blocks;
mod state; mod state;
mod service;
pub mod helpers; pub mod helpers;

View File

@ -0,0 +1,143 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tests for the snapshot service.
use std::sync::Arc;
use client::{BlockChainClient, Client};
use ids::BlockID;
use snapshot::service::{Service, ServiceParams};
use snapshot::{self, ManifestData, SnapshotService};
use spec::Spec;
use tests::helpers::generate_dummy_client_with_spec_and_data;
use devtools::RandomTempPath;
use io::IoChannel;
use util::kvdb::DatabaseConfig;
struct NoopDBRestore;
impl snapshot::DatabaseRestore for NoopDBRestore {
fn restore_db(&self, _new_db: &str) -> Result<(), ::error::Error> {
Ok(())
}
}
#[test]
fn restored_is_equivalent() {
const NUM_BLOCKS: u32 = 400;
const TX_PER: usize = 5;
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices);
let path = RandomTempPath::create_dir();
let mut path = path.as_path().clone();
let mut client_db = path.clone();
client_db.push("client_db");
path.push("snapshot");
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let spec = Spec::new_null();
let client2 = Client::new(
Default::default(),
&spec,
&client_db,
Arc::new(::miner::Miner::with_spec(&spec)),
IoChannel::disconnected(),
&db_config,
).unwrap();
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
db_config: db_config,
pruning: ::util::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: path,
db_restore: client2.clone(),
};
let service = Service::new(service_params).unwrap();
service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap();
let manifest = service.manifest().unwrap();
service.init_restore(manifest.clone(), true).unwrap();
assert!(service.init_restore(manifest.clone(), true).is_ok());
for hash in manifest.state_hashes {
let chunk = service.chunk(hash).unwrap();
service.feed_state_chunk(hash, &chunk);
}
for hash in manifest.block_hashes {
let chunk = service.chunk(hash).unwrap();
service.feed_block_chunk(hash, &chunk);
}
assert_eq!(service.status(), ::snapshot::RestorationStatus::Inactive);
for x in 0..NUM_BLOCKS {
let block1 = client.block(BlockID::Number(x as u64)).unwrap();
let block2 = client2.block(BlockID::Number(x as u64)).unwrap();
assert_eq!(block1, block2);
}
}
#[test]
fn guards_delete_folders() {
let spec = Spec::new_null();
let path = RandomTempPath::create_dir();
let mut path = path.as_path().clone();
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS),
pruning: ::util::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: path.clone(),
db_restore: Arc::new(NoopDBRestore),
};
let service = Service::new(service_params).unwrap();
path.push("restoration");
let manifest = ManifestData {
state_hashes: vec![],
block_hashes: vec![],
block_number: 0,
block_hash: Default::default(),
state_root: Default::default(),
};
service.init_restore(manifest.clone(), true).unwrap();
assert!(path.exists());
service.abort_restore();
assert!(!path.exists());
service.init_restore(manifest.clone(), true).unwrap();
assert!(path.exists());
drop(service);
assert!(!path.exists());
}

View File

@ -184,7 +184,7 @@ impl Spec {
let r = Rlp::new(&seal); let r = Rlp::new(&seal);
(0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect()
}); });
return header; header
} }
/// Compose the genesis block for this chain. /// Compose the genesis block for this chain.
@ -261,6 +261,11 @@ impl Spec {
pub fn new_null() -> Self { pub fn new_null() -> Self {
Spec::load(include_bytes!("../../res/null.json") as &[u8]).expect("null.json is invalid") Spec::load(include_bytes!("../../res/null.json") as &[u8]).expect("null.json is invalid")
} }
/// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring work).
pub fn new_test_instant() -> Self {
Spec::load(include_bytes!("../../res/instant_seal.json") as &[u8]).expect("instant_seal.json is invalid")
}
} }
#[cfg(test)] #[cfg(test)]
@ -274,7 +279,7 @@ mod tests {
// https://github.com/ethcore/parity/issues/1840 // https://github.com/ethcore/parity/issues/1840
#[test] #[test]
fn test_load_empty() { fn test_load_empty() {
assert!(Spec::load(&vec![] as &[u8]).is_err()); assert!(Spec::load(&[] as &[u8]).is_err());
} }
#[test] #[test]

View File

@ -191,12 +191,18 @@ impl State {
})) }))
} }
/// Mutate storage of account `a` so that it is `value` for `key`. /// Get the code of account `a`.
pub fn code(&self, a: &Address) -> Option<Bytes> { pub fn code(&self, a: &Address) -> Option<Bytes> {
self.ensure_cached(a, true, self.ensure_cached(a, true,
|a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec()))) |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec())))
} }
/// Get the code size of account `a`.
pub fn code_size(&self, a: &Address) -> Option<usize> {
self.ensure_cached(a, true,
|a| a.as_ref().map_or(None, |a| a.code().map(|x| x.len())))
}
/// Add `incr` to the balance of account `a`. /// Add `incr` to the balance of account `a`.
pub fn add_balance(&mut self, a: &Address, incr: &U256) { pub fn add_balance(&mut self, a: &Address, incr: &U256) {
trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)); trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a));
@ -420,10 +426,27 @@ impl fmt::Debug for State {
impl Clone for State { impl Clone for State {
fn clone(&self) -> State { fn clone(&self) -> State {
let cache = {
let mut cache = HashMap::new();
for (key, val) in self.cache.borrow().iter() {
let key = key.clone();
match *val {
Some(ref acc) if acc.is_dirty() => {
cache.insert(key, Some(acc.clone()));
},
None => {
cache.insert(key, None);
},
_ => {},
}
}
cache
};
State { State {
db: self.db.boxed_clone(), db: self.db.boxed_clone(),
root: self.root.clone(), root: self.root.clone(),
cache: RefCell::new(self.cache.borrow().clone()), cache: RefCell::new(cache),
snapshots: RefCell::new(self.snapshots.borrow().clone()), snapshots: RefCell::new(self.snapshots.borrow().clone()),
account_start_nonce: self.account_start_nonce.clone(), account_start_nonce: self.account_start_nonce.clone(),
factories: self.factories.clone(), factories: self.factories.clone(),
@ -1314,13 +1337,13 @@ fn storage_at_from_database() {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let (root, db) = { let (root, db) = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state_in(temp.as_path());
state.set_storage(&a, H256::from(&U256::from(01u64)), H256::from(&U256::from(69u64))); state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64)));
state.commit().unwrap(); state.commit().unwrap();
state.drop() state.drop()
}; };
let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
assert_eq!(s.storage_at(&a, &H256::from(&U256::from(01u64))), H256::from(&U256::from(69u64))); assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))), H256::from(&U256::from(69u64)));
} }
#[test] #[test]

View File

@ -19,6 +19,7 @@ use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, Blo
use ethereum; use ethereum;
use block::IsBlock; use block::IsBlock;
use tests::helpers::*; use tests::helpers::*;
use types::filter::Filter;
use common::*; use common::*;
use devtools::*; use devtools::*;
use miner::Miner; use miner::Miner;
@ -131,6 +132,36 @@ fn returns_chain_info() {
assert_eq!(info.best_block_hash, block.header().hash()); assert_eq!(info.best_block_hash, block.header().hash());
} }
#[test]
fn returns_logs() {
let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let logs = client.logs(Filter {
from_block: BlockID::Earliest,
to_block: BlockID::Latest,
address: None,
topics: vec![],
limit: None,
});
assert_eq!(logs.len(), 0);
}
#[test]
fn returns_logs_with_limit() {
let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let logs = client.logs(Filter {
from_block: BlockID::Earliest,
to_block: BlockID::Latest,
address: None,
topics: vec![],
limit: Some(2),
});
assert_eq!(logs.len(), 0);
}
#[test] #[test]
fn returns_block_body() { fn returns_block_body() {
let dummy_block = get_good_dummy_block(); let dummy_block = get_good_dummy_block();

View File

@ -56,7 +56,7 @@ fn can_handshake() {
let stop_guard = StopGuard::new(); let stop_guard = StopGuard::new();
let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc";
run_test_worker(scope, stop_guard.share(), socket_path); run_test_worker(scope, stop_guard.share(), socket_path);
let remote_client = nanoipc::init_client::<RemoteClient<_>>(socket_path).unwrap(); let remote_client = nanoipc::generic_client::<RemoteClient<_>>(socket_path).unwrap();
assert!(remote_client.handshake().is_ok()); assert!(remote_client.handshake().is_ok());
}) })
@ -68,7 +68,7 @@ fn can_query_block() {
let stop_guard = StopGuard::new(); let stop_guard = StopGuard::new();
let socket_path = "ipc:///tmp/parity-client-rpc-20.ipc"; let socket_path = "ipc:///tmp/parity-client-rpc-20.ipc";
run_test_worker(scope, stop_guard.share(), socket_path); run_test_worker(scope, stop_guard.share(), socket_path);
let remote_client = nanoipc::init_client::<RemoteClient<_>>(socket_path).unwrap(); let remote_client = nanoipc::generic_client::<RemoteClient<_>>(socket_path).unwrap();
let non_existant_block = remote_client.block_header(BlockID::Number(999)); let non_existant_block = remote_client.block_header(BlockID::Number(999));

View File

@ -15,57 +15,14 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Traces config. //! Traces config.
use std::str::FromStr;
use bloomchain::Config as BloomConfig; use bloomchain::Config as BloomConfig;
use trace::Error;
/// 3-value enum.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Switch {
/// True.
On,
/// False.
Off,
/// Auto.
Auto,
}
impl Default for Switch {
fn default() -> Self {
Switch::Auto
}
}
impl FromStr for Switch {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"on" => Ok(Switch::On),
"off" => Ok(Switch::Off),
"auto" => Ok(Switch::Auto),
other => Err(format!("Invalid switch value: {}", other))
}
}
}
impl Switch {
/// Tries to turn old switch to new value.
pub fn turn_to(&self, to: Switch) -> Result<bool, Error> {
match (*self, to) {
(Switch::On, Switch::On) | (Switch::On, Switch::Auto) | (Switch::Auto, Switch::On) => Ok(true),
(Switch::Off, Switch::On) => Err(Error::ResyncRequired),
_ => Ok(false),
}
}
}
/// Traces config. /// Traces config.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Config { pub struct Config {
/// Indicates if tracing should be enabled or not. /// Indicates if tracing should be enabled or not.
/// If it's None, it will be automatically configured. /// If it's None, it will be automatically configured.
pub enabled: Switch, pub enabled: bool,
/// Traces blooms configuration. /// Traces blooms configuration.
pub blooms: BloomConfig, pub blooms: BloomConfig,
/// Preferef cache-size. /// Preferef cache-size.
@ -77,7 +34,7 @@ pub struct Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
Config { Config {
enabled: Switch::default(), enabled: false,
blooms: BloomConfig { blooms: BloomConfig {
levels: 3, levels: 3,
elements_per_index: 16, elements_per_index: 16,
@ -87,20 +44,3 @@ impl Default for Config {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::Switch;
#[test]
fn test_switch_parsing() {
assert_eq!(Switch::On, "on".parse().unwrap());
assert_eq!(Switch::Off, "off".parse().unwrap());
assert_eq!(Switch::Auto, "auto".parse().unwrap());
}
#[test]
fn test_switch_default() {
assert_eq!(Switch::default(), Switch::Auto);
}
}

View File

@ -22,7 +22,7 @@ use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf}; use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf};
use header::BlockNumber; use header::BlockNumber;
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error}; use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras};
use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
use blooms; use blooms;
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
@ -126,38 +126,20 @@ impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
impl<T> TraceDB<T> where T: DatabaseExtras { impl<T> TraceDB<T> where T: DatabaseExtras {
/// Creates new instance of `TraceDB`. /// Creates new instance of `TraceDB`.
pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Result<Self, Error> { pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Self {
// check if in previously tracing was enabled
let old_tracing = match tracesdb.get(db::COL_TRACE, b"enabled").unwrap() {
Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
Some(_) => { panic!("tracesdb is corrupted") },
None => Switch::Auto,
};
let enabled = try!(old_tracing.turn_to(config.enabled));
let encoded_tracing = match enabled {
true => [0x1],
false => [0x0]
};
let mut batch = DBTransaction::new(&tracesdb); let mut batch = DBTransaction::new(&tracesdb);
batch.put(db::COL_TRACE, b"enabled", &encoded_tracing);
batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); batch.put(db::COL_TRACE, b"version", TRACE_DB_VER);
tracesdb.write(batch).unwrap(); tracesdb.write(batch).unwrap();
let db = TraceDB { TraceDB {
traces: RwLock::new(HashMap::new()), traces: RwLock::new(HashMap::new()),
blooms: RwLock::new(HashMap::new()), blooms: RwLock::new(HashMap::new()),
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)), cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
tracesdb: tracesdb, tracesdb: tracesdb,
bloom_config: config.blooms, bloom_config: config.blooms,
enabled: enabled, enabled: config.enabled,
extras: extras, extras: extras,
}; }
Ok(db)
} }
fn cache_size(&self) -> usize { fn cache_size(&self) -> usize {
@ -419,7 +401,7 @@ mod tests {
use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction}; use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
use devtools::RandomTempPath; use devtools::RandomTempPath;
use header::BlockNumber; use header::BlockNumber;
use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
use trace::trace::{Call, Action, Res}; use trace::trace::{Call, Action, Res};
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
@ -474,22 +456,10 @@ mod tests {
let mut config = Config::default(); let mut config = Config::default();
// set autotracing // set autotracing
config.enabled = Switch::Auto; config.enabled = false;
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
assert_eq!(tracedb.tracing_enabled(), false);
}
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false); assert_eq!(tracedb.tracing_enabled(), false);
} }
} }
@ -501,50 +471,12 @@ mod tests {
let mut config = Config::default(); let mut config = Config::default();
// set tracing on // set tracing on
config.enabled = Switch::On; config.enabled = true;
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
assert_eq!(tracedb.tracing_enabled(), true); assert_eq!(tracedb.tracing_enabled(), true);
} }
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Auto;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
}
#[test]
#[should_panic]
fn test_invalid_reopening_db() {
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default();
// set tracing on
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::On;
TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
} }
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
@ -595,7 +527,7 @@ mod tests {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap()); let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap());
let mut config = Config::default(); let mut config = Config::default();
config.enabled = Switch::On; config.enabled = true;
let block_0 = H256::from(0xa1); let block_0 = H256::from(0xa1);
let block_1 = H256::from(0xa2); let block_1 = H256::from(0xa2);
let tx_0 = H256::from(0xff); let tx_0 = H256::from(0xff);
@ -607,7 +539,7 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
extras.transaction_hashes.insert(1, vec![tx_1.clone()]); extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap(); let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras));
// import block 0 // import block 0
let request = create_simple_import_request(0, block_0.clone()); let request = create_simple_import_request(0, block_0.clone());
@ -679,10 +611,10 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
// set tracing on // set tracing on
config.enabled = Switch::On; config.enabled = true;
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone()));
// import block 0 // import block 0
let request = create_simple_import_request(0, block_0.clone()); let request = create_simple_import_request(0, block_0.clone());
@ -692,7 +624,7 @@ mod tests {
} }
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras));
let traces = tracedb.transaction_traces(0, 0); let traces = tracedb.transaction_traces(0, 0);
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]); assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]);
} }

View File

@ -26,7 +26,7 @@ mod noop_tracer;
pub use types::trace_types::{filter, flat, localized, trace}; pub use types::trace_types::{filter, flat, localized, trace};
pub use types::trace_types::error::Error as TraceError; pub use types::trace_types::error::Error as TraceError;
pub use self::config::{Config, Switch}; pub use self::config::Config;
pub use self::db::TraceDB; pub use self::db::TraceDB;
pub use self::error::Error; pub use self::error::Error;
pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff};

View File

@ -41,6 +41,12 @@ pub struct Filter {
/// If None, match all. /// If None, match all.
/// If specified, log must contain one of these topics. /// If specified, log must contain one of these topics.
pub topics: Vec<Option<Vec<H256>>>, pub topics: Vec<Option<Vec<H256>>>,
/// Logs limit
///
/// If None, return all logs
/// If specified, should only return *last* `n` logs.
pub limit: Option<usize>,
} }
impl Clone for Filter { impl Clone for Filter {
@ -59,7 +65,8 @@ impl Clone for Filter {
from_block: self.from_block.clone(), from_block: self.from_block.clone(),
to_block: self.to_block.clone(), to_block: self.to_block.clone(),
address: self.address.clone(), address: self.address.clone(),
topics: topics[..].to_vec() topics: topics[..].to_vec(),
limit: self.limit,
} }
} }
} }
@ -117,6 +124,7 @@ mod tests {
to_block: BlockID::Latest, to_block: BlockID::Latest,
address: None, address: None,
topics: vec![None, None, None, None], topics: vec![None, None, None, None],
limit: None,
}; };
let possibilities = none_filter.bloom_possibilities(); let possibilities = none_filter.bloom_possibilities();
@ -136,7 +144,8 @@ mod tests {
None, None,
None, None,
None, None,
] ],
limit: None,
}; };
let possibilities = filter.bloom_possibilities(); let possibilities = filter.bloom_possibilities();
@ -154,7 +163,8 @@ mod tests {
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]),
None, None,
None, None,
] ],
limit: None,
}; };
let possibilities = filter.bloom_possibilities(); let possibilities = filter.bloom_possibilities();
@ -181,7 +191,8 @@ mod tests {
]), ]),
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]),
None None
] ],
limit: None,
}; };
// number of possibilites should be equal 2 * 2 * 2 * 1 = 8 // number of possibilites should be equal 2 * 2 * 2 * 1 = 8
@ -201,7 +212,8 @@ mod tests {
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]), Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]),
None, None,
None, None,
] ],
limit: None,
}; };
let entry0 = LogEntry { let entry0 = LogEntry {

View File

@ -21,7 +21,7 @@ use std::cell::*;
use rlp::*; use rlp::*;
use util::sha3::Hashable; use util::sha3::Hashable;
use util::{H256, Address, U256, Bytes}; use util::{H256, Address, U256, Bytes};
use ethkey::{Signature, sign, Secret, recover, public_to_address, Error as EthkeyError}; use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError};
use error::*; use error::*;
use evm::Schedule; use evm::Schedule;
use header::BlockNumber; use header::BlockNumber;
@ -305,13 +305,18 @@ impl SignedTransaction {
match sender { match sender {
Some(s) => Ok(s), Some(s) => Ok(s),
None => { None => {
let s = public_to_address(&try!(recover(&self.signature(), &self.unsigned.hash()))); let s = public_to_address(&try!(self.public_key()));
self.sender.set(Some(s)); self.sender.set(Some(s));
Ok(s) Ok(s)
} }
} }
} }
/// Returns the public key of the sender.
pub fn public_key(&self) -> Result<Public, Error> {
Ok(try!(recover(&self.signature(), &self.unsigned.hash())))
}
/// Do basic validation, checking for valid signature and minimum gas, /// Do basic validation, checking for valid signature and minimum gas,
// TODO: consider use in block validation. // TODO: consider use in block validation.
#[cfg(test)] #[cfg(test)]

View File

@ -215,7 +215,7 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> {
fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> { fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> {
let block = UntrustedRlp::new(block); let block = UntrustedRlp::new(block);
let tx = try!(block.at(1)); let tx = try!(block.at(1));
let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here
if expected_root != transactions_root { if expected_root != transactions_root {
return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() }))) return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() })))
} }
@ -241,6 +241,7 @@ mod tests {
use spec::*; use spec::*;
use transaction::*; use transaction::*;
use tests::helpers::*; use tests::helpers::*;
use types::log_entry::{LogEntry, LocalizedLogEntry};
use rlp::View; use rlp::View;
fn check_ok(result: Result<(), Error>) { fn check_ok(result: Result<(), Error>) {
@ -333,6 +334,12 @@ mod tests {
fn block_receipts(&self, _hash: &H256) -> Option<BlockReceipts> { fn block_receipts(&self, _hash: &H256) -> Option<BlockReceipts> {
unimplemented!() unimplemented!()
} }
fn logs<F>(&self, _blocks: Vec<BlockNumber>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized {
unimplemented!()
}
} }
fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> { fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> {
@ -415,7 +422,7 @@ mod tests {
let mut uncles_rlp = RlpStream::new(); let mut uncles_rlp = RlpStream::new();
uncles_rlp.append(&good_uncles); uncles_rlp.append(&good_uncles);
let good_uncles_hash = uncles_rlp.as_raw().sha3(); let good_uncles_hash = uncles_rlp.as_raw().sha3();
let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::<SignedTransaction>(t).to_vec()).collect()); let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::<SignedTransaction>(t).to_vec()));
let mut parent = good.clone(); let mut parent = good.clone();
parent.set_number(9); parent.set_number(9);

View File

@ -8,5 +8,5 @@ rust-crypto = "0.2.36"
tiny-keccak = "1.0" tiny-keccak = "1.0"
eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" }
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }

View File

@ -16,12 +16,13 @@
//! Crypto utils used ethstore and network. //! Crypto utils used ethstore and network.
extern crate bigint; extern crate ethcore_bigint as bigint;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate crypto as rcrypto; extern crate crypto as rcrypto;
extern crate secp256k1; extern crate secp256k1;
extern crate ethkey; extern crate ethkey;
use std::fmt;
use tiny_keccak::Keccak; use tiny_keccak::Keccak;
use rcrypto::pbkdf2::pbkdf2; use rcrypto::pbkdf2::pbkdf2;
use rcrypto::scrypt::{scrypt, ScryptParams}; use rcrypto::scrypt::{scrypt, ScryptParams};
@ -39,6 +40,17 @@ pub enum Error {
InvalidMessage, InvalidMessage,
} }
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let s = match *self {
Error::Secp(ref err) => err.to_string(),
Error::InvalidMessage => "Invalid message".into(),
};
write!(f, "{}", s)
}
}
impl From<SecpError> for Error { impl From<SecpError> for Error {
fn from(e: SecpError) -> Self { fn from(e: SecpError) -> Self {
Error::Secp(e) Error::Secp(e)

View File

@ -1,7 +1,7 @@
[package] [package]
name = "ethkey" name = "ethkey"
version = "0.2.0" version = "0.2.0"
authors = ["debris <marek.kotewicz@gmail.com>"] authors = ["Ethcore <admin@ethcore.io>"]
[dependencies] [dependencies]
rand = "0.3.14" rand = "0.3.14"
@ -10,7 +10,7 @@ tiny-keccak = "1.0"
eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" }
rustc-serialize = "0.3" rustc-serialize = "0.3"
docopt = { version = "0.6", optional = true } docopt = { version = "0.6", optional = true }
bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }
[features] [features]
default = [] default = []

View File

@ -20,7 +20,7 @@ extern crate lazy_static;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate secp256k1; extern crate secp256k1;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate bigint; extern crate ethcore_bigint as bigint;
mod brain; mod brain;
mod error; mod error;

View File

@ -16,7 +16,7 @@
use ethkey::{KeyPair, sign, Address, Secret, Signature, Message}; use ethkey::{KeyPair, sign, Address, Secret, Signature, Message};
use {json, Error, crypto}; use {json, Error, crypto};
use crypto::Keccak256; use crypto::{Keccak256};
use random::Random; use random::Random;
use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf}; use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf};
@ -170,6 +170,11 @@ impl SafeAccount {
sign(&secret, message).map_err(From::from) sign(&secret, message).map_err(From::from)
} }
pub fn decrypt(&self, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let secret = try!(self.crypto.secret(password));
crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from)
}
pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result<Self, Error> { pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result<Self, Error> {
let secret = try!(self.crypto.secret(old_password)); let secret = try!(self.crypto.secret(old_password));
let result = SafeAccount { let result = SafeAccount {

View File

@ -17,6 +17,7 @@
use std::fmt; use std::fmt;
use std::io::Error as IoError; use std::io::Error as IoError;
use ethkey::Error as EthKeyError; use ethkey::Error as EthKeyError;
use crypto::Error as EthCryptoError;
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {
@ -28,6 +29,7 @@ pub enum Error {
InvalidKeyFile(String), InvalidKeyFile(String),
CreationFailed, CreationFailed,
EthKey(EthKeyError), EthKey(EthKeyError),
EthCrypto(EthCryptoError),
Custom(String), Custom(String),
} }
@ -42,6 +44,7 @@ impl fmt::Display for Error {
Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason), Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason),
Error::CreationFailed => "Account creation failed".into(), Error::CreationFailed => "Account creation failed".into(),
Error::EthKey(ref err) => err.to_string(), Error::EthKey(ref err) => err.to_string(),
Error::EthCrypto(ref err) => err.to_string(),
Error::Custom(ref s) => s.clone(), Error::Custom(ref s) => s.clone(),
}; };
@ -60,3 +63,9 @@ impl From<EthKeyError> for Error {
Error::EthKey(err) Error::EthKey(err)
} }
} }
impl From<EthCryptoError> for Error {
fn from(err: EthCryptoError) -> Self {
Error::EthCrypto(err)
}
}

View File

@ -144,6 +144,11 @@ impl SecretStore for EthStore {
account.sign(password, message) account.sign(password, message)
} }
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let account = try!(self.get(account));
account.decrypt(password, shared_mac, message)
}
fn uuid(&self, address: &Address) -> Result<UUID, Error> { fn uuid(&self, address: &Address) -> Result<UUID, Error> {
let account = try!(self.get(address)); let account = try!(self.get(address));
Ok(account.id.into()) Ok(account.id.into())

View File

@ -20,33 +20,24 @@ use json::UUID;
pub trait SecretStore: Send + Sync { pub trait SecretStore: Send + Sync {
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error>; fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error>;
fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error>; fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error>;
fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error>; fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error>;
fn accounts(&self) -> Result<Vec<Address>, Error>;
fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>; fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>;
fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>; fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>;
fn sign(&self, account: &Address, password: &str, message: &Message) -> Result<Signature, Error>; fn sign(&self, account: &Address, password: &str, message: &Message) -> Result<Signature, Error>;
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error>;
fn accounts(&self) -> Result<Vec<Address>, Error>;
fn uuid(&self, account: &Address) -> Result<UUID, Error>; fn uuid(&self, account: &Address) -> Result<UUID, Error>;
fn name(&self, account: &Address) -> Result<String, Error>; fn name(&self, account: &Address) -> Result<String, Error>;
fn meta(&self, account: &Address) -> Result<String, Error>; fn meta(&self, account: &Address) -> Result<String, Error>;
fn set_name(&self, address: &Address, name: String) -> Result<(), Error>; fn set_name(&self, address: &Address, name: String) -> Result<(), Error>;
fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error>; fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error>;
fn local_path(&self) -> String; fn local_path(&self) -> String;
fn list_geth_accounts(&self, testnet: bool) -> Vec<Address>; fn list_geth_accounts(&self, testnet: bool) -> Vec<Address>;
fn import_geth_accounts(&self, desired: Vec<Address>, testnet: bool) -> Result<Vec<Address>, Error>; fn import_geth_accounts(&self, desired: Vec<Address>, testnet: bool) -> Result<Vec<Address>, Error>;
} }

View File

@ -240,7 +240,7 @@ mod tests {
::std::thread::spawn(move || { ::std::thread::spawn(move || {
while !hypervisor_ready.load(Ordering::Relaxed) { } while !hypervisor_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_client::<HypervisorServiceClient<_>>(url).unwrap(); let client = nanoipc::fast_client::<HypervisorServiceClient<_>>(url).unwrap();
client.handshake().unwrap(); client.handshake().unwrap();
client.module_ready(test_module_id); client.module_ready(test_module_id);
}); });

View File

@ -110,7 +110,7 @@ impl HypervisorService {
let modules = self.modules.read().unwrap(); let modules = self.modules.read().unwrap();
modules.get(&module_id).map(|module| { modules.get(&module_id).map(|module| {
trace!(target: "hypervisor", "Sending shutdown to {}({})", module_id, &module.control_url); trace!(target: "hypervisor", "Sending shutdown to {}({})", module_id, &module.control_url);
let client = nanoipc::init_client::<ControlServiceClient<_>>(&module.control_url).unwrap(); let client = nanoipc::fast_client::<ControlServiceClient<_>>(&module.control_url).unwrap();
client.shutdown(); client.shutdown();
trace!(target: "hypervisor", "Sent shutdown to {}", module_id); trace!(target: "hypervisor", "Sent shutdown to {}", module_id);
}); });

View File

@ -10,4 +10,4 @@ license = "GPL-3.0"
ethcore-ipc = { path = "../rpc" } ethcore-ipc = { path = "../rpc" }
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" } nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
log = "0.3" log = "0.3"
lazy_static = "0.2"

View File

@ -19,6 +19,7 @@
extern crate ethcore_ipc as ipc; extern crate ethcore_ipc as ipc;
extern crate nanomsg; extern crate nanomsg;
#[macro_use] extern crate log; #[macro_use] extern crate log;
#[macro_use] extern crate lazy_static;
pub use ipc::{WithSocket, IpcInterface, IpcConfig}; pub use ipc::{WithSocket, IpcInterface, IpcConfig};
pub use nanomsg::Socket as NanoSocket; pub use nanomsg::Socket as NanoSocket;
@ -28,7 +29,8 @@ use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut}
use std::ops::Deref; use std::ops::Deref;
const POLL_TIMEOUT: isize = 200; const POLL_TIMEOUT: isize = 200;
const CLIENT_CONNECTION_TIMEOUT: isize = 120000; const DEFAULT_CONNECTION_TIMEOUT: isize = 30000;
const DEBUG_CONNECTION_TIMEOUT: isize = 5000;
/// Generic worker to handle service (binded) sockets /// Generic worker to handle service (binded) sockets
pub struct Worker<S: ?Sized> where S: IpcInterface { pub struct Worker<S: ?Sized> where S: IpcInterface {
@ -68,7 +70,7 @@ pub fn init_duplex_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, Sock
SocketError::DuplexLink SocketError::DuplexLink
})); }));
socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap(); socket.set_receive_timeout(DEFAULT_CONNECTION_TIMEOUT).unwrap();
let endpoint = try!(socket.connect(socket_addr).map_err(|e| { let endpoint = try!(socket.connect(socket_addr).map_err(|e| {
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e); warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e);
@ -84,26 +86,58 @@ pub fn init_duplex_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, Sock
/// Spawns client <`S`> over specified address /// Spawns client <`S`> over specified address
/// creates socket and connects endpoint to it /// creates socket and connects endpoint to it
/// for request-reply connections to the service /// for request-reply connections to the service
pub fn init_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> { pub fn client<S>(socket_addr: &str, receive_timeout: Option<isize>) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
let mut socket = try!(Socket::new(Protocol::Req).map_err(|e| { let mut socket = try!(Socket::new(Protocol::Req).map_err(|e| {
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e); warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
SocketError::RequestLink SocketError::RequestLink
})); }));
socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap(); if let Some(timeout) = receive_timeout {
socket.set_receive_timeout(timeout).unwrap();
}
let endpoint = try!(socket.connect(socket_addr).map_err(|e| { let endpoint = try!(socket.connect(socket_addr).map_err(|e| {
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e); warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e);
SocketError::RequestLink SocketError::RequestLink
})); }));
trace!(target: "ipc", "Created cleint for {}", socket_addr); trace!(target: "ipc", "Created client for {}", socket_addr);
Ok(GuardedSocket { Ok(GuardedSocket {
client: Arc::new(S::init(socket)), client: Arc::new(S::init(socket)),
_endpoint: endpoint, _endpoint: endpoint,
}) })
} }
lazy_static! {
/// Set PARITY_IPC_DEBUG=1 for fail-fast connectivity problems diagnostic
pub static ref DEBUG_FLAG: bool = {
use std::env;
if let Ok(debug) = env::var("PARITY_IPC_DEBUG") {
debug == "1" || debug.to_uppercase() == "TRUE"
}
else { false }
};
}
/// Client with no default timeout on operations
pub fn generic_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
if *DEBUG_FLAG {
client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT))
} else {
client(socket_addr, None)
}
}
/// Client over interface that is supposed to give quick almost non-blocking responses
pub fn fast_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
if *DEBUG_FLAG {
client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT))
} else {
client(socket_addr, Some(DEFAULT_CONNECTION_TIMEOUT))
}
}
/// Error occurred while establising socket or endpoint /// Error occurred while establising socket or endpoint
#[derive(Debug)] #[derive(Debug)]
pub enum SocketError { pub enum SocketError {

View File

@ -10,7 +10,7 @@ rustc-serialize = "0.3"
serde = "0.8" serde = "0.8"
serde_json = "0.8" serde_json = "0.8"
serde_macros = { version = "0.8", optional = true } serde_macros = { version = "0.8", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.8", optional = true } serde_codegen = { version = "0.8", optional = true }

View File

@ -17,23 +17,25 @@
use std::str::{FromStr, from_utf8}; use std::str::{FromStr, from_utf8};
use std::{io, fs}; use std::{io, fs};
use std::io::{BufReader, BufRead}; use std::io::{BufReader, BufRead};
use std::time::Duration; use std::time::{Instant, Duration};
use std::thread::sleep; use std::thread::sleep;
use std::sync::Arc; use std::sync::Arc;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use ethcore_logger::{setup_log, Config as LogConfig}; use ethcore_logger::{setup_log, Config as LogConfig};
use io::{PanicHandler, ForwardPanic}; use io::{PanicHandler, ForwardPanic};
use util::ToPretty; use util::{ToPretty, Uint};
use rlp::PayloadInfo; use rlp::PayloadInfo;
use ethcore::service::ClientService; use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockID};
use ethcore::error::ImportError; use ethcore::error::ImportError;
use ethcore::miner::Miner; use ethcore::miner::Miner;
use cache::CacheConfig; use cache::CacheConfig;
use informant::Informant; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
use params::{SpecType, Pruning}; use informant::{Informant, MillisecondDuration};
use io_handler::ImportIoHandler;
use helpers::{to_client_config, execute_upgrades}; use helpers::{to_client_config, execute_upgrades};
use dir::Directories; use dir::Directories;
use user_defaults::UserDefaults;
use fdlimit; use fdlimit;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -107,32 +109,49 @@ pub fn execute(cmd: BlockchainCmd) -> Result<String, String> {
} }
fn execute_import(cmd: ImportBlockchain) -> Result<String, String> { fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let timer = Instant::now();
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
// Setup logging
let _logger = setup_log(&cmd.logger_config);
// create dirs used by parity
try!(cmd.dirs.create_dirs());
// load spec file // load spec file
let spec = try!(cmd.spec.spec()); let spec = try!(cmd.spec.spec());
// load genesis hash // load genesis hash
let genesis_hash = spec.genesis_header().hash(); let genesis_hash = spec.genesis_header().hash();
// Setup logging // database paths
let _logger = setup_log(&cmd.logger_config); let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
// check if tracing is on
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// select pruning algorithm // select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); let algorithm = cmd.pruning.to_algorithm(&user_defaults);
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); let client_path = db_dirs.client_path(algorithm);
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
// prepare client config // prepare client config
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref()); let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm);
// build client // build client
let service = try!(ClientService::start( let service = try!(ClientService::start(
@ -170,6 +189,10 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color); let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color);
try!(service.register_io_handler(Arc::new(ImportIoHandler {
info: Arc::new(informant),
})).map_err(|_| "Unable to register informant handler".to_owned()));
let do_import = |bytes| { let do_import = |bytes| {
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
match client.import_block(bytes) { match client.import_block(bytes) {
@ -181,7 +204,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
}, },
Ok(_) => {}, Ok(_) => {},
} }
informant.tick();
Ok(()) Ok(())
}; };
@ -215,14 +237,36 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
} }
client.flush_queue(); client.flush_queue();
Ok("Import completed.".into()) // save user defaults
user_defaults.pruning = algorithm;
user_defaults.tracing = tracing;
try!(user_defaults.save(&user_defaults_path));
let report = client.report();
let ms = timer.elapsed().as_milliseconds();
Ok(format!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s",
ms / 1000,
report.blocks_imported,
(report.blocks_imported * 1000) as u64 / ms,
report.transactions_applied,
(report.transactions_applied * 1000) as u64 / ms,
report.gas_processed / From::from(1_000_000),
(report.gas_processed / From::from(ms * 1000)).low_u64(),
).into())
} }
fn execute_export(cmd: ExportBlockchain) -> Result<String, String> { fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
let format = cmd.format.unwrap_or_else(Default::default); // Setup logging
let _logger = setup_log(&cmd.logger_config);
// create dirs used by parity
try!(cmd.dirs.create_dirs());
let format = cmd.format.unwrap_or_default();
// load spec file // load spec file
let spec = try!(cmd.spec.spec()); let spec = try!(cmd.spec.spec());
@ -230,23 +274,32 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
// load genesis hash // load genesis hash
let genesis_hash = spec.genesis_header().hash(); let genesis_hash = spec.genesis_header().hash();
// Setup logging // database paths
let _logger = setup_log(&cmd.logger_config); let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
// check if tracing is on
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// select pruning algorithm // select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); let algorithm = cmd.pruning.to_algorithm(&user_defaults);
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); let client_path = db_dirs.client_path(algorithm);
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
// prepare client config // prepare client config
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm);
let service = try!(ClientService::start( let service = try!(ClientService::start(
client_config, client_config,
@ -266,10 +319,10 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
}; };
let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found")); let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found"));
let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found")); let to = try!(client.block_number(cmd.to_block).ok_or("To block could not be found"));
for i in from..(to + 1) { for i in from..(to + 1) {
let b = client.block(BlockID::Number(i)).unwrap(); let b = try!(client.block(BlockID::Number(i)).ok_or("Error exporting incomplete chain"));
match format { match format {
DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }

View File

@ -54,16 +54,14 @@ pub fn payload<B: ipc::BinaryConvertable>() -> Result<B, BootError> {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
try!( try!(
io::stdin().read_to_end(&mut buffer) io::stdin().read_to_end(&mut buffer).map_err(BootError::ReadArgs)
.map_err(|io_err| BootError::ReadArgs(io_err))
); );
ipc::binary::deserialize::<B>(&buffer) ipc::binary::deserialize::<B>(&buffer).map_err(BootError::DecodeArgs)
.map_err(|binary_error| BootError::DecodeArgs(binary_error))
} }
pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket<HypervisorServiceClient<NanoSocket>>{ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket<HypervisorServiceClient<NanoSocket>>{
let hypervisor_client = nanoipc::init_client::<HypervisorServiceClient<_>>(hv_url).unwrap(); let hypervisor_client = nanoipc::fast_client::<HypervisorServiceClient<_>>(hv_url).unwrap();
hypervisor_client.handshake().unwrap(); hypervisor_client.handshake().unwrap();
hypervisor_client.module_ready(module_id, control_url.to_owned()); hypervisor_client.module_ready(module_id, control_url.to_owned());
@ -73,7 +71,7 @@ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> Guar
pub fn dependency<C: WithSocket<NanoSocket>>(url: &str) pub fn dependency<C: WithSocket<NanoSocket>>(url: &str)
-> Result<GuardedSocket<C>, BootError> -> Result<GuardedSocket<C>, BootError>
{ {
nanoipc::init_client::<C>(url).map_err(|socket_err| BootError::DependencyConnect(socket_err)) nanoipc::generic_client::<C>(url).map_err(BootError::DependencyConnect)
} }
pub fn main_thread() -> Arc<AtomicBool> { pub fn main_thread() -> Arc<AtomicBool> {

View File

@ -0,0 +1,98 @@
[parity]
mode = "active"
mode_timeout = 300
mode_alarm = 3600
chain = "homestead"
db_path = "$HOME/.parity"
keys_path = "$HOME/.parity/keys"
identity = ""
[account]
unlock = ["0xdeadbeefcafe0000000000000000000000000000"]
password = ["~/.safe/password.file"]
keys_iterations = 10240
[signer]
force = false
disable = false
port = 8180
interface = "127.0.0.1"
path = "$HOME/.parity/signer"
[network]
disable = false
port = 30303
min_peers = 25
max_peers = 50
nat = "any"
id = "0x1"
bootnodes = []
discovery = true
reserved_only = false
reserved_peers = "./path_to_file"
[rpc]
disable = false
port = 8545
interface = "local"
cors = "null"
apis = ["web3", "eth", "net", "personal", "ethcore", "traces", "rpc"]
hosts = ["none"]
[ipc]
disable = false
path = "$HOME/.parity/jsonrpc.ipc"
apis = ["web3", "eth", "net", "personal", "ethcore", "traces", "rpc"]
[dapps]
disable = false
port = 8080
interface = "local"
hosts = ["none"]
path = "$HOME/.parity/dapps"
# authorization:
user = "test_user"
pass = "test_pass"
[mining]
author = "0xdeadbeefcafe0000000000000000000000000001"
force_sealing = true
reseal_on_txs = "all"
reseal_min_period = 4000
work_queue_size = 20
relay_set = "cheap"
usd_per_tx = "0"
usd_per_eth = "auto"
price_update_period = "hourly"
gas_floor_target = "4700000"
gas_cap = "6283184"
tx_queue_size = 1024
tx_gas_limit = "6283184"
extra_data = "Parity"
remove_solved = false
notify_work = ["http://localhost:3001"]
[footprint]
tracing = "auto"
pruning = "auto"
cache_size_db = 64
cache_size_blocks = 8
cache_size_queue = 50
cache_size = 128 # Overrides above caches with total size
fast_and_loose = false
db_compaction = "ssd"
fat_db = false
[snapshots]
disable_periodic = false
[vm]
jit = false
[misc]
logging = "own_tx=trace"
log_file = "/var/log/parity.log"
color = true

View File

@ -0,0 +1,2 @@
[account
unlock = "0x1"

View File

@ -0,0 +1,4 @@
[account]
unlock = "0x1"
passwd = []

63
parity/cli/config.toml Normal file
View File

@ -0,0 +1,63 @@
[parity]
mode = "dark"
mode_timeout = 15
mode_alarm = 10
chain = "./chain.json"
[account]
unlock = ["0x1", "0x2", "0x3"]
password = ["passwdfile path"]
[signer]
disable = true
[network]
disable = false
discovery = true
nat = "any"
min_peers = 10
max_peers = 20
reserved_only = true
reserved_peers = "./path/to/reserved_peers"
[rpc]
disable = true
port = 8180
[ipc]
apis = ["rpc", "eth"]
[dapps]
port = 8080
user = "username"
pass = "password"
[mining]
author = "0xdeadbeefcafe0000000000000000000000000001"
force_sealing = true
reseal_on_txs = "all"
reseal_min_period = 4000
price_update_period = "hourly"
tx_queue_size = 2048
[footprint]
tracing = "on"
pruning = "fast"
cache_size_db = 128
cache_size_blocks = 16
cache_size_queue = 100
db_compaction = "ssd"
fat_db = true
[snapshots]
disable_periodic = true
[vm]
jit = false
[misc]
logging = "own_tx=trace"
log_file = "/var/log/parity.log"
color = true

705
parity/cli/mod.rs Normal file
View File

@ -0,0 +1,705 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[macro_use]
mod usage;
usage! {
{
// Commands
cmd_daemon: bool,
cmd_wallet: bool,
cmd_account: bool,
cmd_new: bool,
cmd_list: bool,
cmd_export: bool,
cmd_import: bool,
cmd_signer: bool,
cmd_new_token: bool,
cmd_snapshot: bool,
cmd_restore: bool,
cmd_ui: bool,
cmd_tools: bool,
cmd_hash: bool,
// Arguments
arg_pid_file: String,
arg_file: Option<String>,
arg_path: Vec<String>,
// Flags
// -- Legacy Options
flag_geth: bool,
flag_testnet: bool,
flag_import_geth_keys: bool,
flag_datadir: Option<String>,
flag_networkid: Option<String>,
flag_peers: Option<u16>,
flag_nodekey: Option<String>,
flag_nodiscover: bool,
flag_jsonrpc: bool,
flag_jsonrpc_off: bool,
flag_webapp: bool,
flag_dapps_off: bool,
flag_rpc: bool,
flag_rpcaddr: Option<String>,
flag_rpcport: Option<u16>,
flag_rpcapi: Option<String>,
flag_rpccorsdomain: Option<String>,
flag_ipcdisable: bool,
flag_ipc_off: bool,
flag_ipcapi: Option<String>,
flag_ipcpath: Option<String>,
flag_gasprice: Option<String>,
flag_etherbase: Option<String>,
flag_extradata: Option<String>,
flag_cache: Option<u32>,
// -- Miscellaneous Options
flag_version: bool,
flag_no_config: bool,
}
{
// -- Operating Options
flag_mode: String = "active", or |c: &Config| otry!(c.parity).mode.clone(),
flag_mode_timeout: u64 = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(),
flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(),
flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(),
flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(),
flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(),
// -- Account Options
flag_unlock: Option<String> = None,
or |c: &Config| otry!(c.account).unlock.clone().map(|vec| Some(vec.join(","))),
flag_password: Vec<String> = Vec::new(),
or |c: &Config| otry!(c.account).password.clone(),
flag_keys_iterations: u32 = 10240u32,
or |c: &Config| otry!(c.account).keys_iterations.clone(),
flag_force_signer: bool = false,
or |c: &Config| otry!(c.signer).force.clone(),
flag_no_signer: bool = false,
or |c: &Config| otry!(c.signer).disable.clone(),
flag_signer_port: u16 = 8180u16,
or |c: &Config| otry!(c.signer).port.clone(),
flag_signer_interface: String = "local",
or |c: &Config| otry!(c.signer).interface.clone(),
flag_signer_path: String = "$HOME/.parity/signer",
or |c: &Config| otry!(c.signer).path.clone(),
// NOTE [todr] For security reasons don't put this to config files
flag_signer_no_validation: bool = false, or |_| None,
// -- Networking Options
flag_no_network: bool = false,
or |c: &Config| otry!(c.network).disable.clone(),
flag_port: u16 = 30303u16,
or |c: &Config| otry!(c.network).port.clone(),
flag_min_peers: u16 = 25u16,
or |c: &Config| otry!(c.network).min_peers.clone(),
flag_max_peers: u16 = 50u16,
or |c: &Config| otry!(c.network).max_peers.clone(),
flag_nat: String = "any",
or |c: &Config| otry!(c.network).nat.clone(),
flag_network_id: Option<String> = None,
or |c: &Config| otry!(c.network).id.clone().map(Some),
flag_bootnodes: Option<String> = None,
or |c: &Config| otry!(c.network).bootnodes.clone().map(|vec| Some(vec.join(","))),
flag_no_discovery: bool = false,
or |c: &Config| otry!(c.network).discovery.map(|d| !d).clone(),
flag_node_key: Option<String> = None,
or |c: &Config| otry!(c.network).node_key.clone().map(Some),
flag_reserved_peers: Option<String> = None,
or |c: &Config| otry!(c.network).reserved_peers.clone().map(Some),
flag_reserved_only: bool = false,
or |c: &Config| otry!(c.network).reserved_only.clone(),
// -- API and Console Options
// RPC
flag_no_jsonrpc: bool = false,
or |c: &Config| otry!(c.rpc).disable.clone(),
flag_jsonrpc_port: u16 = 8545u16,
or |c: &Config| otry!(c.rpc).port.clone(),
flag_jsonrpc_interface: String = "local",
or |c: &Config| otry!(c.rpc).interface.clone(),
flag_jsonrpc_cors: Option<String> = None,
or |c: &Config| otry!(c.rpc).cors.clone().map(Some),
flag_jsonrpc_apis: String = "web3,eth,net,ethcore,personal,traces,rpc",
or |c: &Config| otry!(c.rpc).apis.clone().map(|vec| vec.join(",")),
flag_jsonrpc_hosts: String = "none",
or |c: &Config| otry!(c.rpc).hosts.clone().map(|vec| vec.join(",")),
// IPC
flag_no_ipc: bool = false,
or |c: &Config| otry!(c.ipc).disable.clone(),
flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc",
or |c: &Config| otry!(c.ipc).path.clone(),
flag_ipc_apis: String = "web3,eth,net,ethcore,personal,traces,rpc",
or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")),
// DAPPS
flag_no_dapps: bool = false,
or |c: &Config| otry!(c.dapps).disable.clone(),
flag_dapps_port: u16 = 8080u16,
or |c: &Config| otry!(c.dapps).port.clone(),
flag_dapps_interface: String = "local",
or |c: &Config| otry!(c.dapps).interface.clone(),
flag_dapps_hosts: String = "none",
or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")),
flag_dapps_path: String = "$HOME/.parity/dapps",
or |c: &Config| otry!(c.dapps).path.clone(),
flag_dapps_user: Option<String> = None,
or |c: &Config| otry!(c.dapps).user.clone().map(Some),
flag_dapps_pass: Option<String> = None,
or |c: &Config| otry!(c.dapps).pass.clone().map(Some),
// -- Sealing/Mining Options
flag_author: Option<String> = None,
or |c: &Config| otry!(c.mining).author.clone().map(Some),
flag_force_sealing: bool = false,
or |c: &Config| otry!(c.mining).force_sealing.clone(),
flag_reseal_on_txs: String = "own",
or |c: &Config| otry!(c.mining).reseal_on_txs.clone(),
flag_reseal_min_period: u64 = 2000u64,
or |c: &Config| otry!(c.mining).reseal_min_period.clone(),
flag_work_queue_size: usize = 20usize,
or |c: &Config| otry!(c.mining).work_queue_size.clone(),
flag_tx_gas_limit: Option<String> = None,
or |c: &Config| otry!(c.mining).tx_gas_limit.clone().map(Some),
flag_relay_set: String = "cheap",
or |c: &Config| otry!(c.mining).relay_set.clone(),
flag_usd_per_tx: String = "0",
or |c: &Config| otry!(c.mining).usd_per_tx.clone(),
flag_usd_per_eth: String = "auto",
or |c: &Config| otry!(c.mining).usd_per_eth.clone(),
flag_price_update_period: String = "hourly",
or |c: &Config| otry!(c.mining).price_update_period.clone(),
flag_gas_floor_target: String = "4700000",
or |c: &Config| otry!(c.mining).gas_floor_target.clone(),
flag_gas_cap: String = "6283184",
or |c: &Config| otry!(c.mining).gas_cap.clone(),
flag_extra_data: Option<String> = None,
or |c: &Config| otry!(c.mining).extra_data.clone().map(Some),
flag_tx_queue_size: usize = 1024usize,
or |c: &Config| otry!(c.mining).tx_queue_size.clone(),
flag_remove_solved: bool = false,
or |c: &Config| otry!(c.mining).remove_solved.clone(),
flag_notify_work: Option<String> = None,
or |c: &Config| otry!(c.mining).notify_work.clone().map(|vec| Some(vec.join(","))),
// -- Footprint Options
flag_tracing: String = "auto",
or |c: &Config| otry!(c.footprint).tracing.clone(),
flag_pruning: String = "auto",
or |c: &Config| otry!(c.footprint).pruning.clone(),
flag_cache_size_db: u32 = 64u32,
or |c: &Config| otry!(c.footprint).cache_size_db.clone(),
flag_cache_size_blocks: u32 = 8u32,
or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(),
flag_cache_size_queue: u32 = 50u32,
or |c: &Config| otry!(c.footprint).cache_size_queue.clone(),
flag_cache_size: Option<u32> = None,
or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some),
flag_fast_and_loose: bool = false,
or |c: &Config| otry!(c.footprint).fast_and_loose.clone(),
flag_db_compaction: String = "ssd",
or |c: &Config| otry!(c.footprint).db_compaction.clone(),
flag_fat_db: bool = false,
or |c: &Config| otry!(c.footprint).fat_db.clone(),
// -- Import/Export Options
flag_from: String = "1", or |_| None,
flag_to: String = "latest", or |_| None,
flag_format: Option<String> = None, or |_| None,
// -- Snapshot Optons
flag_at: String = "latest", or |_| None,
flag_no_periodic_snapshot: bool = false,
or |c: &Config| otry!(c.snapshots).disable_periodic.clone(),
// -- Virtual Machine Options
flag_jitvm: bool = false,
or |c: &Config| otry!(c.vm).jit.clone(),
// -- Miscellaneous Options
flag_config: String = "$HOME/.parity/config.toml", or |_| None,
flag_logging: Option<String> = None,
or |c: &Config| otry!(c.misc).logging.clone().map(Some),
flag_log_file: Option<String> = None,
or |c: &Config| otry!(c.misc).log_file.clone().map(Some),
flag_no_color: bool = false,
or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(),
}
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Config {
parity: Option<Operating>,
account: Option<Account>,
signer: Option<Signer>,
network: Option<Network>,
rpc: Option<Rpc>,
ipc: Option<Ipc>,
dapps: Option<Dapps>,
mining: Option<Mining>,
footprint: Option<Footprint>,
snapshots: Option<Snapshots>,
vm: Option<VM>,
misc: Option<Misc>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Operating {
mode: Option<String>,
mode_timeout: Option<u64>,
mode_alarm: Option<u64>,
chain: Option<String>,
db_path: Option<String>,
keys_path: Option<String>,
identity: Option<String>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Account {
unlock: Option<Vec<String>>,
password: Option<Vec<String>>,
keys_iterations: Option<u32>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Signer {
force: Option<bool>,
disable: Option<bool>,
port: Option<u16>,
interface: Option<String>,
path: Option<String>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Network {
disable: Option<bool>,
port: Option<u16>,
min_peers: Option<u16>,
max_peers: Option<u16>,
nat: Option<String>,
id: Option<String>,
bootnodes: Option<Vec<String>>,
discovery: Option<bool>,
node_key: Option<String>,
reserved_peers: Option<String>,
reserved_only: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Rpc {
disable: Option<bool>,
port: Option<u16>,
interface: Option<String>,
cors: Option<String>,
apis: Option<Vec<String>>,
hosts: Option<Vec<String>>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Ipc {
disable: Option<bool>,
path: Option<String>,
apis: Option<Vec<String>>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Dapps {
disable: Option<bool>,
port: Option<u16>,
interface: Option<String>,
hosts: Option<Vec<String>>,
path: Option<String>,
user: Option<String>,
pass: Option<String>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Mining {
author: Option<String>,
force_sealing: Option<bool>,
reseal_on_txs: Option<String>,
reseal_min_period: Option<u64>,
work_queue_size: Option<usize>,
tx_gas_limit: Option<String>,
relay_set: Option<String>,
usd_per_tx: Option<String>,
usd_per_eth: Option<String>,
price_update_period: Option<String>,
gas_floor_target: Option<String>,
gas_cap: Option<String>,
extra_data: Option<String>,
tx_queue_size: Option<usize>,
remove_solved: Option<bool>,
notify_work: Option<Vec<String>>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Footprint {
tracing: Option<String>,
pruning: Option<String>,
fast_and_loose: Option<bool>,
cache_size: Option<u32>,
cache_size_db: Option<u32>,
cache_size_blocks: Option<u32>,
cache_size_queue: Option<u32>,
db_compaction: Option<String>,
fat_db: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Snapshots {
disable_periodic: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct VM {
jit: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Misc {
logging: Option<String>,
log_file: Option<String>,
color: Option<bool>,
}
#[cfg(test)]
mod tests {
use super::{
Args, ArgsError,
Config, Operating, Account, Signer, Network, Rpc, Ipc, Dapps, Mining, Footprint, Snapshots, VM, Misc
};
use toml;
#[test]
fn should_parse_args_and_include_config() {
// given
let mut config = Config::default();
let mut operating = Operating::default();
operating.chain = Some("morden".into());
config.parity = Some(operating);
// when
let args = Args::parse_with_config(&["parity"], config).unwrap();
// then
assert_eq!(args.flag_chain, "morden".to_owned());
}
#[test]
fn should_not_use_config_if_cli_is_provided() {
// given
let mut config = Config::default();
let mut operating = Operating::default();
operating.chain = Some("morden".into());
config.parity = Some(operating);
// when
let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap();
// then
assert_eq!(args.flag_chain, "xyz".to_owned());
}
#[test]
fn should_parse_full_config() {
// given
let config = toml::decode_str(include_str!("./config.full.toml")).unwrap();
// when
let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap();
// then
assert_eq!(args, Args {
// Commands
cmd_daemon: false,
cmd_wallet: false,
cmd_account: false,
cmd_new: false,
cmd_list: false,
cmd_export: false,
cmd_import: false,
cmd_signer: false,
cmd_new_token: false,
cmd_snapshot: false,
cmd_restore: false,
cmd_ui: false,
cmd_tools: false,
cmd_hash: false,
// Arguments
arg_pid_file: "".into(),
arg_file: None,
arg_path: vec![],
// -- Operating Options
flag_mode: "active".into(),
flag_mode_timeout: 300u64,
flag_mode_alarm: 3600u64,
flag_chain: "xyz".into(),
flag_db_path: "$HOME/.parity".into(),
flag_keys_path: "$HOME/.parity/keys".into(),
flag_identity: "".into(),
// -- Account Options
flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()),
flag_password: vec!["~/.safe/password.file".into()],
flag_keys_iterations: 10240u32,
flag_force_signer: false,
flag_no_signer: false,
flag_signer_port: 8180u16,
flag_signer_interface: "127.0.0.1".into(),
flag_signer_path: "$HOME/.parity/signer".into(),
flag_signer_no_validation: false,
// -- Networking Options
flag_no_network: false,
flag_port: 30303u16,
flag_min_peers: 25u16,
flag_max_peers: 50u16,
flag_nat: "any".into(),
flag_network_id: Some("0x1".into()),
flag_bootnodes: Some("".into()),
flag_no_discovery: false,
flag_node_key: None,
flag_reserved_peers: Some("./path_to_file".into()),
flag_reserved_only: false,
// -- API and Console Options
// RPC
flag_no_jsonrpc: false,
flag_jsonrpc_port: 8545u16,
flag_jsonrpc_interface: "local".into(),
flag_jsonrpc_cors: Some("null".into()),
flag_jsonrpc_apis: "web3,eth,net,personal,ethcore,traces,rpc".into(),
flag_jsonrpc_hosts: "none".into(),
// IPC
flag_no_ipc: false,
flag_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(),
flag_ipc_apis: "web3,eth,net,personal,ethcore,traces,rpc".into(),
// DAPPS
flag_no_dapps: false,
flag_dapps_port: 8080u16,
flag_dapps_interface: "local".into(),
flag_dapps_hosts: "none".into(),
flag_dapps_path: "$HOME/.parity/dapps".into(),
flag_dapps_user: Some("test_user".into()),
flag_dapps_pass: Some("test_pass".into()),
// -- Sealing/Mining Options
flag_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()),
flag_force_sealing: true,
flag_reseal_on_txs: "all".into(),
flag_reseal_min_period: 4000u64,
flag_work_queue_size: 20usize,
flag_tx_gas_limit: Some("6283184".into()),
flag_relay_set: "cheap".into(),
flag_usd_per_tx: "0".into(),
flag_usd_per_eth: "auto".into(),
flag_price_update_period: "hourly".into(),
flag_gas_floor_target: "4700000".into(),
flag_gas_cap: "6283184".into(),
flag_extra_data: Some("Parity".into()),
flag_tx_queue_size: 1024usize,
flag_remove_solved: false,
flag_notify_work: Some("http://localhost:3001".into()),
// -- Footprint Options
flag_tracing: "auto".into(),
flag_pruning: "auto".into(),
flag_cache_size_db: 64u32,
flag_cache_size_blocks: 8u32,
flag_cache_size_queue: 50u32,
flag_cache_size: Some(128),
flag_fast_and_loose: false,
flag_db_compaction: "ssd".into(),
flag_fat_db: false,
// -- Import/Export Options
flag_from: "1".into(),
flag_to: "latest".into(),
flag_format: None,
// -- Snapshot Optons
flag_at: "latest".into(),
flag_no_periodic_snapshot: false,
// -- Virtual Machine Options
flag_jitvm: false,
// -- Legacy Options
flag_geth: false,
flag_testnet: false,
flag_import_geth_keys: false,
flag_datadir: None,
flag_networkid: None,
flag_peers: None,
flag_nodekey: None,
flag_nodiscover: false,
flag_jsonrpc: false,
flag_jsonrpc_off: false,
flag_webapp: false,
flag_dapps_off: false,
flag_rpc: false,
flag_rpcaddr: None,
flag_rpcport: None,
flag_rpcapi: None,
flag_rpccorsdomain: None,
flag_ipcdisable: false,
flag_ipc_off: false,
flag_ipcapi: None,
flag_ipcpath: None,
flag_gasprice: None,
flag_etherbase: None,
flag_extradata: None,
flag_cache: None,
// -- Miscellaneous Options
flag_version: false,
flag_config: "$HOME/.parity/config.toml".into(),
flag_logging: Some("own_tx=trace".into()),
flag_log_file: Some("/var/log/parity.log".into()),
flag_no_color: false,
flag_no_config: false,
});
}
#[test]
fn should_parse_config_and_return_errors() {
let config1 = Args::parse_config(include_str!("./config.invalid1.toml"));
let config2 = Args::parse_config(include_str!("./config.invalid2.toml"));
match (config1, config2) {
(Err(ArgsError::Parsing(_)), Err(ArgsError::Decode(_))) => {},
(a, b) => {
assert!(false, "Got invalid error types: {:?}, {:?}", a, b);
}
}
}
#[test]
fn should_deserialize_toml_file() {
let config: Config = toml::decode_str(include_str!("./config.toml")).unwrap();
assert_eq!(config, Config {
parity: Some(Operating {
mode: Some("dark".into()),
mode_timeout: Some(15u64),
mode_alarm: Some(10u64),
chain: Some("./chain.json".into()),
db_path: None,
keys_path: None,
identity: None,
}),
account: Some(Account {
unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]),
password: Some(vec!["passwdfile path".into()]),
keys_iterations: None,
}),
signer: Some(Signer {
force: None,
disable: Some(true),
port: None,
interface: None,
path: None,
}),
network: Some(Network {
disable: Some(false),
port: None,
min_peers: Some(10),
max_peers: Some(20),
nat: Some("any".into()),
id: None,
bootnodes: None,
discovery: Some(true),
node_key: None,
reserved_peers: Some("./path/to/reserved_peers".into()),
reserved_only: Some(true),
}),
rpc: Some(Rpc {
disable: Some(true),
port: Some(8180),
interface: None,
cors: None,
apis: None,
hosts: None,
}),
ipc: Some(Ipc {
disable: None,
path: None,
apis: Some(vec!["rpc".into(), "eth".into()]),
}),
dapps: Some(Dapps {
disable: None,
port: Some(8080),
path: None,
interface: None,
hosts: None,
user: Some("username".into()),
pass: Some("password".into())
}),
mining: Some(Mining {
author: Some("0xdeadbeefcafe0000000000000000000000000001".into()),
force_sealing: Some(true),
reseal_on_txs: Some("all".into()),
reseal_min_period: Some(4000),
work_queue_size: None,
relay_set: None,
usd_per_tx: None,
usd_per_eth: None,
price_update_period: Some("hourly".into()),
gas_floor_target: None,
gas_cap: None,
tx_queue_size: Some(2048),
tx_gas_limit: None,
extra_data: None,
remove_solved: None,
notify_work: None,
}),
footprint: Some(Footprint {
tracing: Some("on".into()),
pruning: Some("fast".into()),
fast_and_loose: None,
cache_size: None,
cache_size_db: Some(128),
cache_size_blocks: Some(16),
cache_size_queue: Some(100),
db_compaction: Some("ssd".into()),
fat_db: Some(true),
}),
snapshots: Some(Snapshots {
disable_periodic: Some(true),
}),
vm: Some(VM {
jit: Some(false),
}),
misc: Some(Misc {
logging: Some("own_tx=trace".into()),
log_file: Some("/var/log/parity.log".into()),
color: Some(true),
})
});
}
}

213
parity/cli/usage.rs Normal file
View File

@ -0,0 +1,213 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
macro_rules! otry {
($e: expr) => (
match $e {
Some(ref v) => v,
None => {
return None;
}
}
)
}
macro_rules! usage {
(
{
$(
$field_a:ident : $typ_a:ty,
)*
}
{
$(
$field:ident : $typ:ty = $default:expr, or $from_config:expr,
)*
}
) => {
use toml;
use std::{fs, io, process};
use std::io::Read;
use util::version;
use docopt::{Docopt, Error as DocoptError};
use helpers::replace_home;
use rustc_serialize;
#[derive(Debug)]
pub enum ArgsError {
Docopt(DocoptError),
Parsing(Vec<toml::ParserError>),
Decode(toml::DecodeError),
Config(String, io::Error),
}
impl ArgsError {
pub fn exit(self) -> ! {
match self {
ArgsError::Docopt(e) => e.exit(),
ArgsError::Parsing(errors) => {
println!("There is an error in config file.");
for e in &errors {
println!("{}", e);
}
process::exit(2)
},
ArgsError::Decode(e) => {
println!("You might have supplied invalid parameters in config file.");
println!("{}", e);
process::exit(2)
},
ArgsError::Config(path, e) => {
println!("There was an error reading your config file at: {}", path);
println!("{}", e);
process::exit(2)
}
}
}
}
impl From<DocoptError> for ArgsError {
fn from(e: DocoptError) -> Self { ArgsError::Docopt(e) }
}
impl From<toml::DecodeError> for ArgsError {
fn from(e: toml::DecodeError) -> Self { ArgsError::Decode(e) }
}
#[derive(Debug, PartialEq)]
pub struct Args {
$(
pub $field_a: $typ_a,
)*
$(
pub $field: $typ,
)*
}
impl Default for Args {
fn default() -> Self {
Args {
$(
$field_a: Default::default(),
)*
$(
$field: $default.into(),
)*
}
}
}
#[derive(Default, Debug, PartialEq, Clone, RustcDecodable)]
struct RawArgs {
$(
$field_a: $typ_a,
)*
$(
$field: Option<$typ>,
)*
}
impl Args {
pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
let raw_args = try!(RawArgs::parse(command));
// Skip loading config file if no_config flag is specified
if raw_args.flag_no_config {
return Ok(raw_args.into_args(Config::default()));
}
let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config);
let config_file = replace_home(&config_file);
let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) {
// Load config file
(Ok(mut file), _) => {
println!("Loading config file from {}", &config_file);
let mut config = String::new();
try!(file.read_to_string(&mut config).map_err(|e| ArgsError::Config(config_file, e)));
try!(Self::parse_config(&config))
},
// Don't display error in case default config cannot be loaded.
(Err(_), false) => Config::default(),
// Config set from CLI (fail with error)
(Err(e), true) => {
return Err(ArgsError::Config(config_file, e));
},
};
Ok(raw_args.into_args(config))
}
#[cfg(test)]
pub fn parse_without_config<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
Self::parse_with_config(command, Config::default())
}
#[cfg(test)]
fn parse_with_config<S: AsRef<str>>(command: &[S], config: Config) -> Result<Self, ArgsError> {
Ok(try!(RawArgs::parse(command)).into_args(config))
}
fn parse_config(config: &str) -> Result<Config, ArgsError> {
let mut value_parser = toml::Parser::new(&config);
match value_parser.parse() {
Some(value) => {
let result = rustc_serialize::Decodable::decode(&mut toml::Decoder::new(toml::Value::Table(value)));
match result {
Ok(config) => Ok(config),
Err(e) => Err(e.into()),
}
},
None => Err(ArgsError::Parsing(value_parser.errors)),
}
}
pub fn print_version() -> String {
format!(include_str!("./version.txt"), version())
}
}
impl RawArgs {
fn into_args(self, config: Config) -> Args {
let mut args = Args::default();
$(
args.$field_a = self.$field_a;
)*
$(
args.$field = self.$field.or_else(|| $from_config(&config)).unwrap_or_else(|| $default.into());
)*
args
}
pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, DocoptError> {
Docopt::new(Self::usage()).and_then(|d| d.argv(command).decode())
}
fn usage() -> String {
format!(
include_str!("./usage.txt"),
$(
$field={ let v: $typ = $default.into(); v },
// Uncomment this to debug
// "named argument never used" error
// $field = $default,
)*
)
}
}
};
}

View File

@ -1,23 +1,3 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::version;
use docopt::Docopt;
pub const USAGE: &'static str = r#"
Parity. Ethereum Client. Parity. Ethereum Client.
By Wood/Paronyan/Kotewicz/Drwięga/Volf et al. By Wood/Paronyan/Kotewicz/Drwięga/Volf et al.
Copyright 2015, 2016 Ethcore (UK) Limited Copyright 2015, 2016 Ethcore (UK) Limited
@ -33,7 +13,8 @@ Usage:
parity export [ <file> ] [options] parity export [ <file> ] [options]
parity signer new-token [options] parity signer new-token [options]
parity snapshot <file> [options] parity snapshot <file> [options]
parity restore <file> [options] parity restore [ <file> ] [options]
parity tools hash <file>
Operating Options: Operating Options:
--mode MODE Set the operating mode. MODE can be one of: --mode MODE Set the operating mode. MODE can be one of:
@ -41,134 +22,140 @@ Operating Options:
passive - Parity syncs initially, then sleeps and passive - Parity syncs initially, then sleeps and
wakes regularly to resync. wakes regularly to resync.
dark - Parity syncs only when an external interface dark - Parity syncs only when an external interface
is active. [default: active]. is active. (default: {flag_mode}).
--mode-timeout SECS Specify the number of seconds before inactivity --mode-timeout SECS Specify the number of seconds before inactivity
timeout occurs when mode is dark or passive timeout occurs when mode is dark or passive
[default: 300]. (default: {flag_mode_timeout}).
--mode-alarm SECS Specify the number of seconds before auto sleep --mode-alarm SECS Specify the number of seconds before auto sleep
reawake timeout occurs when mode is passive reawake timeout occurs when mode is passive
[default: 3600]. (default: {flag_mode_alarm}).
--chain CHAIN Specify the blockchain type. CHAIN may be either a --chain CHAIN Specify the blockchain type. CHAIN may be either a
JSON chain specification file or olympic, frontier, JSON chain specification file or olympic, frontier,
homestead, mainnet, morden, classic or testnet homestead, mainnet, morden, classic or testnet
[default: homestead]. (default: {flag_chain}).
-d --db-path PATH Specify the database & configuration directory path -d --db-path PATH Specify the database & configuration directory path
[default: $HOME/.parity]. (default: {flag_db_path}).
--keys-path PATH Specify the path for JSON key files to be found --keys-path PATH Specify the path for JSON key files to be found
[default: $HOME/.parity/keys]. (default: {flag_keys_path}).
--identity NAME Specify your node's name. --identity NAME Specify your node's name. (default: {flag_identity})
Account Options: Account Options:
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
ACCOUNTS is a comma-delimited list of addresses. ACCOUNTS is a comma-delimited list of addresses.
Implies --no-signer. Implies --no-signer. (default: {flag_unlock:?})
--password FILE Provide a file containing a password for unlocking --password FILE Provide a file containing a password for unlocking
an account. an account. (default: {flag_password:?})
--keys-iterations NUM Specify the number of iterations to use when --keys-iterations NUM Specify the number of iterations to use when
deriving key from the password (bigger is more deriving key from the password (bigger is more
secure) [default: 10240]. secure) (default: {flag_keys_iterations}).
--force-signer Enable Trusted Signer WebSocket endpoint used by --force-signer Enable Trusted Signer WebSocket endpoint used by
Signer UIs, even when --unlock is in use. Signer UIs, even when --unlock is in use.
(default: ${flag_force_signer})
--no-signer Disable Trusted Signer WebSocket endpoint used by --no-signer Disable Trusted Signer WebSocket endpoint used by
Signer UIs. Signer UIs. (default: ${flag_no_signer})
--signer-port PORT Specify the port of Trusted Signer server --signer-port PORT Specify the port of Trusted Signer server
[default: 8180]. (default: {flag_signer_port}).
--signer-interface IP Specify the hostname portion of the Trusted Signer --signer-interface IP Specify the hostname portion of the Trusted Signer
server, IP should be an interface's IP address, server, IP should be an interface's IP address,
or local [default: local]. or local (default: {flag_signer_interface}).
--signer-path PATH Specify directory where Signer UIs tokens should --signer-path PATH Specify directory where Signer UIs tokens should
be stored. [default: $HOME/.parity/signer] be stored. (default: {flag_signer_path})
--signer-no-validation Disable Origin and Host headers validation for --signer-no-validation Disable Origin and Host headers validation for
Trusted Signer. WARNING: INSECURE. Used only for Trusted Signer. WARNING: INSECURE. Used only for
development. development. (default: {flag_signer_no_validation})
Networking Options: Networking Options:
--no-network Disable p2p networking. --no-network Disable p2p networking. (default: {flag_no_network})
--port PORT Override the port on which the node should listen --port PORT Override the port on which the node should listen
[default: 30303]. (default: {flag_port}).
--min-peers NUM Try to maintain at least NUM peers [default: 25]. --min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
--max-peers NUM Allow up to that many peers [default: 50]. --max-peers NUM Allow up to that many peers (default: {flag_max_peers}).
--nat METHOD Specify method to use for determining public --nat METHOD Specify method to use for determining public
address. Must be one of: any, none, upnp, address. Must be one of: any, none, upnp,
extip:<IP> [default: any]. extip:<IP> (default: {flag_nat}).
--network-id INDEX Override the network identifier from the chain we --network-id INDEX Override the network identifier from the chain we
are on. are on. (default: {flag_network_id:?})
--bootnodes NODES Override the bootnodes from our chain. NODES should --bootnodes NODES Override the bootnodes from our chain. NODES should
be comma-delimited enodes. be comma-delimited enodes. (default: {flag_bootnodes:?})
--no-discovery Disable new peer discovery. --no-discovery Disable new peer discovery. (default: {flag_no_discovery})
--node-key KEY Specify node secret key, either as 64-character hex --node-key KEY Specify node secret key, either as 64-character hex
string or input to SHA3 operation. string or input to SHA3 operation. (default: {flag_node_key:?})
--reserved-peers FILE Provide a file containing enodes, one per line. --reserved-peers FILE Provide a file containing enodes, one per line.
These nodes will always have a reserved slot on top These nodes will always have a reserved slot on top
of the normal maximum peers. of the normal maximum peers. (default: {flag_reserved_peers:?})
--reserved-only Connect only to reserved nodes. --reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
API and Console Options: API and Console Options:
--no-jsonrpc Disable the JSON-RPC API server. --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server --jsonrpc-port PORT Specify the port portion of the JSONRPC API server
[default: 8545]. (default: {flag_jsonrpc_port}).
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API --jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
server, IP should be an interface's IP address, or server, IP should be an interface's IP address, or
all (all interfaces) or local [default: local]. all (all interfaces) or local (default: {flag_jsonrpc_interface}).
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
(default: {flag_jsonrpc_cors:?})
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC --jsonrpc-apis APIS Specify the APIs available through the JSONRPC
interface. APIS is a comma-delimited list of API interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth, net, personal, name. Possible name are web3, eth, net, personal,
ethcore, ethcore_set, traces, rpc. ethcore, ethcore_set, traces, rpc.
[default: web3,eth,net,ethcore,personal,traces,rpc]. (default: {flag_jsonrpc_apis}).
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will --jsonrpc-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it validate the Host header sent by the browser, it
is additional security against some attack is additional security against some attack
vectors. Special options: "all", "none", vectors. Special options: "all", "none",
[default: none]. (default: {flag_jsonrpc_hosts}).
--no-ipc Disable JSON-RPC over IPC service. --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
--ipc-path PATH Specify custom path for JSON-RPC over IPC service --ipc-path PATH Specify custom path for JSON-RPC over IPC service
[default: $HOME/.parity/jsonrpc.ipc]. (default: {flag_ipc_path}).
--ipc-apis APIS Specify custom API set available via JSON-RPC over --ipc-apis APIS Specify custom API set available via JSON-RPC over
IPC [default: web3,eth,net,ethcore,personal,traces,rpc]. IPC (default: {flag_ipc_apis}).
--no-dapps Disable the Dapps server (e.g. status page). --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
--dapps-port PORT Specify the port portion of the Dapps server --dapps-port PORT Specify the port portion of the Dapps server
[default: 8080]. (default: {flag_dapps_port}).
--dapps-interface IP Specify the hostname portion of the Dapps --dapps-interface IP Specify the hostname portion of the Dapps
server, IP should be an interface's IP address, server, IP should be an interface's IP address,
or local [default: local]. or local (default: {flag_dapps_interface}).
--dapps-hosts HOSTS List of allowed Host header values. This option will --dapps-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it validate the Host header sent by the browser, it
is additional security against some attack is additional security against some attack
vectors. Special options: "all", "none", vectors. Special options: "all", "none",
[default: none]. (default: {flag_dapps_hosts}).
--dapps-user USERNAME Specify username for Dapps server. It will be --dapps-user USERNAME Specify username for Dapps server. It will be
used in HTTP Basic Authentication Scheme. used in HTTP Basic Authentication Scheme.
If --dapps-pass is not specified you will be If --dapps-pass is not specified you will be
asked for password on startup. asked for password on startup. (default: {flag_dapps_user:?})
--dapps-pass PASSWORD Specify password for Dapps server. Use only in --dapps-pass PASSWORD Specify password for Dapps server. Use only in
conjunction with --dapps-user. conjunction with --dapps-user. (default: {flag_dapps_pass:?})
--dapps-path PATH Specify directory where dapps should be installed. --dapps-path PATH Specify directory where dapps should be installed.
[default: $HOME/.parity/dapps] (default: {flag_dapps_path})
Sealing/Mining Options: Sealing/Mining Options:
--author ADDRESS Specify the block author (aka "coinbase") address --author ADDRESS Specify the block author (aka "coinbase") address
for sending block rewards from sealed blocks. for sending block rewards from sealed blocks.
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION. NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
(default: {flag_author:?})
--force-sealing Force the node to author new blocks as if it were --force-sealing Force the node to author new blocks as if it were
always sealing/mining. always sealing/mining.
(default: {flag_force_sealing})
--reseal-on-txs SET Specify which transactions should force the node --reseal-on-txs SET Specify which transactions should force the node
to reseal a block. SET is one of: to reseal a block. SET is one of:
none - never reseal on new transactions; none - never reseal on new transactions;
own - reseal only on a new local transaction; own - reseal only on a new local transaction;
ext - reseal only on a new external transaction; ext - reseal only on a new external transaction;
all - reseal on all new transactions [default: own]. all - reseal on all new transactions
(default: {flag_reseal_on_txs}).
--reseal-min-period MS Specify the minimum time between reseals from --reseal-min-period MS Specify the minimum time between reseals from
incoming transactions. MS is time measured in incoming transactions. MS is time measured in
milliseconds [default: 2000]. milliseconds (default: {flag_reseal_min_period}).
--work-queue-size ITEMS Specify the number of historical work packages --work-queue-size ITEMS Specify the number of historical work packages
which are kept cached lest a solution is found for which are kept cached lest a solution is found for
them later. High values take more memory but result them later. High values take more memory but result
in fewer unusable solutions [default: 20]. in fewer unusable solutions (default: {flag_work_queue_size}).
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas --tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
a single transaction may have for it to be mined. a single transaction may have for it to be mined.
(default: {flag_tx_gas_limit:?})
--relay-set SET Set of transactions to relay. SET may be: --relay-set SET Set of transactions to relay. SET may be:
cheap - Relay any transaction in the queue (this cheap - Relay any transaction in the queue (this
may include invalid transactions); may include invalid transactions);
@ -176,78 +163,81 @@ Sealing/Mining Options:
guarantees we don't relay invalid transactions, but guarantees we don't relay invalid transactions, but
means we relay nothing if not mining); means we relay nothing if not mining);
lenient - Same as strict when mining, and cheap lenient - Same as strict when mining, and cheap
when not [default: cheap]. when not (default: {flag_relay_set}).
--usd-per-tx USD Amount of USD to be paid for a basic transaction --usd-per-tx USD Amount of USD to be paid for a basic transaction
[default: 0]. The minimum gas price is set (default: {flag_usd_per_tx}). The minimum gas price is set
accordingly. accordingly.
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an --usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
amount in USD, a web service or 'auto' to use each amount in USD, a web service or 'auto' to use each
web service in turn and fallback on the last known web service in turn and fallback on the last known
good value [default: auto]. good value (default: {flag_usd_per_eth}).
--price-update-period T T will be allowed to pass between each gas price --price-update-period T T will be allowed to pass between each gas price
update. T may be daily, hourly, a number of seconds, update. T may be daily, hourly, a number of seconds,
or a time string of the form "2 days", "30 minutes" or a time string of the form "2 days", "30 minutes"
etc. [default: hourly]. etc. (default: {flag_price_update_period}).
--gas-floor-target GAS Amount of gas per block to target when sealing a new --gas-floor-target GAS Amount of gas per block to target when sealing a new
block [default: 4700000]. block (default: {flag_gas_floor_target}).
--gas-cap GAS A cap on how large we will raise the gas limit per --gas-cap GAS A cap on how large we will raise the gas limit per
block due to transaction volume [default: 6283184]. block due to transaction volume (default: {flag_gas_cap}).
--extra-data STRING Specify a custom extra-data for authored blocks, no --extra-data STRING Specify a custom extra-data for authored blocks, no
more than 32 characters. more than 32 characters. (default: {flag_extra_data:?})
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
to be included in next block) [default: 1024]. to be included in next block) (default: {flag_tx_queue_size}).
--remove-solved Move solved blocks from the work package queue --remove-solved Move solved blocks from the work package queue
instead of cloning them. This gives a slightly instead of cloning them. This gives a slightly
faster import speed, but means that extra solutions faster import speed, but means that extra solutions
submitted for the same work package will go unused. submitted for the same work package will go unused.
(default: {flag_remove_solved})
--notify-work URLS URLs to which work package notifications are pushed. --notify-work URLS URLs to which work package notifications are pushed.
URLS should be a comma-delimited list of HTTP URLs. URLS should be a comma-delimited list of HTTP URLs.
(default: {flag_notify_work:?})
Footprint Options: Footprint Options:
--tracing BOOL Indicates if full transaction tracing should be --tracing BOOL Indicates if full transaction tracing should be
enabled. Works only if client had been fully synced enabled. Works only if client had been fully synced
with tracing enabled. BOOL may be one of auto, on, with tracing enabled. BOOL may be one of auto, on,
off. auto uses last used value of this option (off off. auto uses last used value of this option (off
if it does not exist) [default: auto]. if it does not exist) (default: {flag_tracing}).
--pruning METHOD Configure pruning of the state/storage trie. METHOD --pruning METHOD Configure pruning of the state/storage trie. METHOD
may be one of auto, archive, fast: may be one of auto, archive, fast:
archive - keep all state trie data. No pruning. archive - keep all state trie data. No pruning.
fast - maintain journal overlay. Fast but 50MB used. fast - maintain journal overlay. Fast but 50MB used.
auto - use the method most recently synced or auto - use the method most recently synced or
default to fast if none synced [default: auto]. default to fast if none synced (default: {flag_pruning}).
--cache-size-db MB Override database cache size [default: 64]. --cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
--cache-size-blocks MB Specify the prefered size of the blockchain cache in --cache-size-blocks MB Specify the prefered size of the blockchain cache in
megabytes [default: 8]. megabytes (default: {flag_cache_size_blocks}).
--cache-size-queue MB Specify the maximum size of memory to use for block --cache-size-queue MB Specify the maximum size of memory to use for block
queue [default: 50]. queue (default: {flag_cache_size_queue}).
--cache-size MB Set total amount of discretionary memory to use for --cache-size MB Set total amount of discretionary memory to use for
the entire system, overrides other cache and queue the entire system, overrides other cache and queue
options. options.a (default: {flag_cache_size:?})
--fast-and-loose Disables DB WAL, which gives a significant speed up --fast-and-loose Disables DB WAL, which gives a significant speed up
but means an unclean exit is unrecoverable. but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
--db-compaction TYPE Database compaction type. TYPE may be one of: --db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs; ssd - suitable for SSDs and fast HDDs;
hdd - suitable for slow HDDs [default: ssd]. hdd - suitable for slow HDDs (default: {flag_db_compaction}).
--fat-db Fat database. --fat-db Fat database. (default: {flag_fat_db})
Import/Export Options: Import/Export Options:
--from BLOCK Export from block BLOCK, which may be an index or --from BLOCK Export from block BLOCK, which may be an index or
hash [default: 1]. hash (default: {flag_from}).
--to BLOCK Export to (including) block BLOCK, which may be an --to BLOCK Export to (including) block BLOCK, which may be an
index, hash or 'latest' [default: latest]. index, hash or 'latest' (default: {flag_to}).
--format FORMAT For import/export in given format. FORMAT must be --format FORMAT For import/export in given format. FORMAT must be
one of 'hex' and 'binary'. one of 'hex' and 'binary'.
(default: {flag_format:?} = Import: auto, Export: binary)
Snapshot Options: Snapshot Options:
--at BLOCK Take a snapshot at the given block, which may be an --at BLOCK Take a snapshot at the given block, which may be an
index, hash, or 'latest'. Note that taking snapshots at index, hash, or 'latest'. Note that taking snapshots at
non-recent blocks will only work with --pruning archive non-recent blocks will only work with --pruning archive
[default: latest] (default: {flag_at})
--no-periodic-snapshot Disable automated snapshots which usually occur once --no-periodic-snapshot Disable automated snapshots which usually occur once
every 10000 blocks. every 10000 blocks. (default: {flag_no_periodic_snapshot})
Virtual Machine Options: Virtual Machine Options:
--jitvm Enable the JIT VM. --jitvm Enable the JIT VM. (default: {flag_jitvm})
Legacy Options: Legacy Options:
--geth Run in Geth-compatibility mode. Sets the IPC path --geth Run in Geth-compatibility mode. Sets the IPC path
@ -284,156 +274,13 @@ Legacy Options:
--cache MB Equivalent to --cache-size MB. --cache MB Equivalent to --cache-size MB.
Miscellaneous Options: Miscellaneous Options:
-c --config CONFIG Specify a filename containing a configuration file.
(default: {flag_config})
-l --logging LOGGING Specify the logging level. Must conform to the same -l --logging LOGGING Specify the logging level. Must conform to the same
format as RUST_LOG. format as RUST_LOG. (default: {flag_logging:?})
--log-file FILENAME Specify a filename into which logging should be --log-file FILENAME Specify a filename into which logging should be
directed. directed. (default: {flag_log_file:?})
--no-color Don't use terminal color codes in output. --no-config Don't load a configuration file.
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
-v --version Show information about version. -v --version Show information about version.
-h --help Show this screen. -h --help Show this screen.
"#;
#[derive(Debug, PartialEq, RustcDecodable)]
pub struct Args {
pub cmd_daemon: bool,
pub cmd_account: bool,
pub cmd_wallet: bool,
pub cmd_new: bool,
pub cmd_list: bool,
pub cmd_export: bool,
pub cmd_import: bool,
pub cmd_signer: bool,
pub cmd_new_token: bool,
pub cmd_snapshot: bool,
pub cmd_restore: bool,
pub cmd_ui: bool,
pub arg_pid_file: String,
pub arg_file: Option<String>,
pub arg_path: Vec<String>,
pub flag_mode: String,
pub flag_mode_timeout: u64,
pub flag_mode_alarm: u64,
pub flag_chain: String,
pub flag_db_path: String,
pub flag_identity: String,
pub flag_unlock: Option<String>,
pub flag_password: Vec<String>,
pub flag_keys_path: String,
pub flag_keys_iterations: u32,
pub flag_import_geth_keys: bool,
pub flag_bootnodes: Option<String>,
pub flag_network_id: Option<String>,
pub flag_pruning: String,
pub flag_tracing: String,
pub flag_port: u16,
pub flag_min_peers: u16,
pub flag_max_peers: u16,
pub flag_no_discovery: bool,
pub flag_nat: String,
pub flag_node_key: Option<String>,
pub flag_reserved_peers: Option<String>,
pub flag_reserved_only: bool,
pub flag_cache_size_db: u32,
pub flag_cache_size_blocks: u32,
pub flag_cache_size_queue: u32,
pub flag_cache_size: Option<u32>,
pub flag_cache: Option<u32>,
pub flag_fast_and_loose: bool,
pub flag_no_jsonrpc: bool,
pub flag_jsonrpc_interface: String,
pub flag_jsonrpc_port: u16,
pub flag_jsonrpc_cors: Option<String>,
pub flag_jsonrpc_hosts: String,
pub flag_jsonrpc_apis: String,
pub flag_no_ipc: bool,
pub flag_ipc_path: String,
pub flag_ipc_apis: String,
pub flag_no_dapps: bool,
pub flag_dapps_port: u16,
pub flag_dapps_interface: String,
pub flag_dapps_hosts: String,
pub flag_dapps_user: Option<String>,
pub flag_dapps_pass: Option<String>,
pub flag_dapps_path: String,
pub flag_force_signer: bool,
pub flag_no_signer: bool,
pub flag_signer_port: u16,
pub flag_signer_interface: String,
pub flag_signer_path: String,
pub flag_signer_no_validation: bool,
pub flag_force_sealing: bool,
pub flag_reseal_on_txs: String,
pub flag_reseal_min_period: u64,
pub flag_work_queue_size: usize,
pub flag_remove_solved: bool,
pub flag_tx_gas_limit: Option<String>,
pub flag_relay_set: String,
pub flag_author: Option<String>,
pub flag_usd_per_tx: String,
pub flag_usd_per_eth: String,
pub flag_price_update_period: String,
pub flag_gas_floor_target: String,
pub flag_gas_cap: String,
pub flag_extra_data: Option<String>,
pub flag_tx_queue_size: usize,
pub flag_notify_work: Option<String>,
pub flag_logging: Option<String>,
pub flag_version: bool,
pub flag_from: String,
pub flag_to: String,
pub flag_at: String,
pub flag_no_periodic_snapshot: bool,
pub flag_format: Option<String>,
pub flag_jitvm: bool,
pub flag_log_file: Option<String>,
pub flag_no_color: bool,
pub flag_no_network: bool,
// legacy...
pub flag_geth: bool,
pub flag_nodekey: Option<String>,
pub flag_nodiscover: bool,
pub flag_peers: Option<u16>,
pub flag_datadir: Option<String>,
pub flag_extradata: Option<String>,
pub flag_etherbase: Option<String>,
pub flag_gasprice: Option<String>,
pub flag_jsonrpc: bool,
pub flag_webapp: bool,
pub flag_rpc: bool,
pub flag_rpcaddr: Option<String>,
pub flag_rpcport: Option<u16>,
pub flag_rpccorsdomain: Option<String>,
pub flag_rpcapi: Option<String>,
pub flag_testnet: bool,
pub flag_networkid: Option<String>,
pub flag_ipcdisable: bool,
pub flag_ipc_off: bool,
pub flag_jsonrpc_off: bool,
pub flag_dapps_off: bool,
pub flag_ipcpath: Option<String>,
pub flag_ipcapi: Option<String>,
pub flag_db_compaction: String,
pub flag_fat_db: bool,
}
impl Default for Args {
fn default() -> Self {
Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap()
}
}
pub fn print_version() -> String {
format!("\
Parity
version {}
Copyright 2015, 2016 Ethcore (UK) Limited
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
", version())
}

9
parity/cli/version.txt Normal file
View File

@ -0,0 +1,9 @@
Parity
version {}
Copyright 2015, 2016 Ethcore (UK) Limited
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
By Wood/Paronyan/Kotewicz/Drwięga/Volf.

View File

@ -19,8 +19,7 @@ use std::io::Read;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::PathBuf; use std::path::PathBuf;
use std::cmp::max; use std::cmp::max;
use cli::{USAGE, Args}; use cli::{Args, ArgsError};
use docopt::{Docopt, Error as DocoptError};
use util::{Hashable, U256, Uint, Bytes, version_data, Secret, Address}; use util::{Hashable, U256, Uint, Bytes, version_data, Secret, Address};
use util::log::Colour; use util::log::Colour;
use ethsync::{NetworkConfiguration, is_valid_node_url}; use ethsync::{NetworkConfiguration, is_valid_node_url};
@ -52,6 +51,7 @@ pub enum Cmd {
Blockchain(BlockchainCmd), Blockchain(BlockchainCmd),
SignerToken(String), SignerToken(String),
Snapshot(SnapshotCommand), Snapshot(SnapshotCommand),
Hash(Option<String>),
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -60,8 +60,8 @@ pub struct Configuration {
} }
impl Configuration { impl Configuration {
pub fn parse<S, I>(command: I) -> Result<Self, DocoptError> where I: IntoIterator<Item=S>, S: AsRef<str> { pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
let args = try!(Docopt::new(USAGE).and_then(|d| d.argv(command).decode())); let args = try!(Args::parse(command));
let config = Configuration { let config = Configuration {
args: args, args: args,
@ -95,8 +95,10 @@ impl Configuration {
let cmd = if self.args.flag_version { let cmd = if self.args.flag_version {
Cmd::Version Cmd::Version
} else if self.args.cmd_signer { } else if self.args.cmd_signer && self.args.cmd_new_token {
Cmd::SignerToken(dirs.signer) Cmd::SignerToken(dirs.signer)
} else if self.args.cmd_tools && self.args.cmd_hash {
Cmd::Hash(self.args.arg_file)
} else if self.args.cmd_account { } else if self.args.cmd_account {
let account_cmd = if self.args.cmd_new { let account_cmd = if self.args.cmd_new {
let new_acc = NewAccount { let new_acc = NewAccount {
@ -628,8 +630,7 @@ impl Configuration {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use cli::USAGE; use cli::Args;
use docopt::Docopt;
use ethcore_rpc::NetworkSettings; use ethcore_rpc::NetworkSettings;
use ethcore::client::{VMType, BlockID}; use ethcore::client::{VMType, BlockID};
use helpers::{replace_home, default_network_config}; use helpers::{replace_home, default_network_config};
@ -647,21 +648,21 @@ mod tests {
fn parse(args: &[&str]) -> Configuration { fn parse(args: &[&str]) -> Configuration {
Configuration { Configuration {
args: Docopt::new(USAGE).unwrap().argv(args).decode().unwrap(), args: Args::parse_without_config(args).unwrap(),
} }
} }
#[test] #[test]
fn test_command_version() { fn test_command_version() {
let args = vec!["parity", "--version"]; let args = vec!["parity", "--version"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Version); assert_eq!(conf.into_command().unwrap(), Cmd::Version);
} }
#[test] #[test]
fn test_command_account_new() { fn test_command_account_new() {
let args = vec!["parity", "account", "new"]; let args = vec!["parity", "account", "new"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount { assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount {
iterations: 10240, iterations: 10240,
path: replace_home("$HOME/.parity/keys"), path: replace_home("$HOME/.parity/keys"),
@ -672,7 +673,7 @@ mod tests {
#[test] #[test]
fn test_command_account_list() { fn test_command_account_list() {
let args = vec!["parity", "account", "list"]; let args = vec!["parity", "account", "list"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Account( assert_eq!(conf.into_command().unwrap(), Cmd::Account(
AccountCmd::List(replace_home("$HOME/.parity/keys"))) AccountCmd::List(replace_home("$HOME/.parity/keys")))
); );
@ -681,7 +682,7 @@ mod tests {
#[test] #[test]
fn test_command_account_import() { fn test_command_account_import() {
let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; let args = vec!["parity", "account", "import", "my_dir", "another_dir"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts { assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts {
from: vec!["my_dir".into(), "another_dir".into()], from: vec!["my_dir".into(), "another_dir".into()],
to: replace_home("$HOME/.parity/keys"), to: replace_home("$HOME/.parity/keys"),
@ -691,7 +692,7 @@ mod tests {
#[test] #[test]
fn test_command_wallet_import() { fn test_command_wallet_import() {
let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"]; let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet { assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet {
iterations: 10240, iterations: 10240,
path: replace_home("$HOME/.parity/keys"), path: replace_home("$HOME/.parity/keys"),
@ -703,7 +704,7 @@ mod tests {
#[test] #[test]
fn test_command_blockchain_import() { fn test_command_blockchain_import() {
let args = vec!["parity", "import", "blockchain.json"]; let args = vec!["parity", "import", "blockchain.json"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain {
spec: Default::default(), spec: Default::default(),
logger_config: Default::default(), logger_config: Default::default(),
@ -723,7 +724,7 @@ mod tests {
#[test] #[test]
fn test_command_blockchain_export() { fn test_command_blockchain_export() {
let args = vec!["parity", "export", "blockchain.json"]; let args = vec!["parity", "export", "blockchain.json"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain {
spec: Default::default(), spec: Default::default(),
logger_config: Default::default(), logger_config: Default::default(),
@ -744,7 +745,7 @@ mod tests {
#[test] #[test]
fn test_command_blockchain_export_with_custom_format() { fn test_command_blockchain_export_with_custom_format() {
let args = vec!["parity", "export", "--format", "hex", "blockchain.json"]; let args = vec!["parity", "export", "--format", "hex", "blockchain.json"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain {
spec: Default::default(), spec: Default::default(),
logger_config: Default::default(), logger_config: Default::default(),
@ -765,7 +766,7 @@ mod tests {
#[test] #[test]
fn test_command_signer_new_token() { fn test_command_signer_new_token() {
let args = vec!["parity", "signer", "new-token"]; let args = vec!["parity", "signer", "new-token"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
let expected = replace_home("$HOME/.parity/signer"); let expected = replace_home("$HOME/.parity/signer");
assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected)); assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected));
} }
@ -773,7 +774,7 @@ mod tests {
#[test] #[test]
fn test_run_cmd() { fn test_run_cmd() {
let args = vec!["parity"]; let args = vec!["parity"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd { assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd {
cache_config: Default::default(), cache_config: Default::default(),
dirs: Default::default(), dirs: Default::default(),
@ -962,7 +963,7 @@ mod tests {
let filename = temp.as_str().to_owned() + "/peers"; let filename = temp.as_str().to_owned() + "/peers";
File::create(filename.clone()).unwrap().write_all(b" \n\t\n").unwrap(); File::create(filename.clone()).unwrap().write_all(b" \n\t\n").unwrap();
let args = vec!["parity", "--reserved-peers", &filename]; let args = vec!["parity", "--reserved-peers", &filename];
let conf = Configuration::parse(args).unwrap(); let conf = Configuration::parse(&args).unwrap();
assert!(conf.init_reserved_nodes().is_ok()); assert!(conf.init_reserved_nodes().is_ok());
} }
} }

View File

@ -52,32 +52,13 @@ impl Directories {
Ok(()) Ok(())
} }
/// Get the chain's root path. /// Database paths.
pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { pub fn database(&self, genesis_hash: H256, fork_name: Option<String>) -> DatabaseDirectories {
let mut dir = Path::new(&self.db).to_path_buf(); DatabaseDirectories {
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); path: self.db.clone(),
dir genesis_hash: genesis_hash,
fork_name: fork_name,
} }
/// Get the root path for database
pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = self.chain_path(genesis_hash, fork_name);
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = self.db_version_path(genesis_hash, fork_name, pruning);
dir.push("db");
dir
}
/// Get the path for the snapshot directory given the genesis hash and fork name.
pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf {
let mut dir = self.chain_path(genesis_hash, fork_name);
dir.push("snapshot");
dir
} }
/// Get the ipc sockets path /// Get the ipc sockets path
@ -88,6 +69,49 @@ impl Directories {
} }
} }
#[derive(Debug, PartialEq)]
pub struct DatabaseDirectories {
pub path: String,
pub genesis_hash: H256,
pub fork_name: Option<String>,
}
impl DatabaseDirectories {
fn fork_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
dir
}
/// Get the root path for database
pub fn version_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.fork_path();
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.version_path(pruning);
dir.push("db");
dir
}
/// Get user defaults path
pub fn user_defaults_path(&self) -> PathBuf {
let mut dir = self.fork_path();
dir.push("user_defaults");
dir
}
/// Get the path for the snapshot directory given the genesis hash and fork name.
pub fn snapshot_path(&self) -> PathBuf {
let mut dir = self.fork_path();
dir.push("snapshot");
dir
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Directories; use super::Directories;

View File

@ -19,13 +19,12 @@ use std::io::{Write, Read, BufReader, BufRead};
use std::time::Duration; use std::time::Duration;
use std::path::Path; use std::path::Path;
use std::fs::File; use std::fs::File;
use util::{clean_0x, U256, Uint, Address, path, H256, CompactionProfile}; use util::{clean_0x, U256, Uint, Address, path, CompactionProfile};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig};
use ethcore::miner::PendingSet; use ethcore::miner::PendingSet;
use cache::CacheConfig; use cache::CacheConfig;
use dir::Directories; use dir::DatabaseDirectories;
use params::Pruning;
use upgrade::upgrade; use upgrade::upgrade;
use migration::migrate; use migration::migrate;
use ethsync::is_valid_node_url; use ethsync::is_valid_node_url;
@ -190,16 +189,13 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration {
#[cfg_attr(feature = "dev", allow(too_many_arguments))] #[cfg_attr(feature = "dev", allow(too_many_arguments))]
pub fn to_client_config( pub fn to_client_config(
cache_config: &CacheConfig, cache_config: &CacheConfig,
dirs: &Directories,
genesis_hash: H256,
mode: Mode, mode: Mode,
tracing: Switch, tracing: bool,
pruning: Pruning,
compaction: DatabaseCompactionProfile, compaction: DatabaseCompactionProfile,
wal: bool, wal: bool,
vm_type: VMType, vm_type: VMType,
name: String, name: String,
fork_name: Option<&String>, pruning: Algorithm,
) -> ClientConfig { ) -> ClientConfig {
let mut client_config = ClientConfig::default(); let mut client_config = ClientConfig::default();
@ -221,7 +217,7 @@ pub fn to_client_config(
client_config.mode = mode; client_config.mode = mode;
client_config.tracing.enabled = tracing; client_config.tracing.enabled = tracing;
client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name); client_config.pruning = pruning;
client_config.db_compaction = compaction; client_config.db_compaction = compaction;
client_config.db_wal = wal; client_config.db_wal = wal;
client_config.vm_type = vm_type; client_config.vm_type = vm_type;
@ -230,14 +226,12 @@ pub fn to_client_config(
} }
pub fn execute_upgrades( pub fn execute_upgrades(
dirs: &Directories, dirs: &DatabaseDirectories,
genesis_hash: H256,
fork_name: Option<&String>,
pruning: Algorithm, pruning: Algorithm,
compaction_profile: CompactionProfile compaction_profile: CompactionProfile
) -> Result<(), String> { ) -> Result<(), String> {
match upgrade(Some(&dirs.db)) { match upgrade(Some(&dirs.path)) {
Ok(upgrades_applied) if upgrades_applied > 0 => { Ok(upgrades_applied) if upgrades_applied > 0 => {
debug!("Executed {} upgrade scripts - ok", upgrades_applied); debug!("Executed {} upgrade scripts - ok", upgrades_applied);
}, },
@ -247,7 +241,7 @@ pub fn execute_upgrades(
_ => {}, _ => {},
} }
let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning); let client_path = dirs.version_path(pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
} }

View File

@ -41,7 +41,9 @@ pub struct Informant {
skipped: AtomicUsize, skipped: AtomicUsize,
} }
trait MillisecondDuration { /// Something that can be converted to milliseconds.
pub trait MillisecondDuration {
/// Get the value in milliseconds.
fn as_milliseconds(&self) -> u64; fn as_milliseconds(&self) -> u64;
} }

Some files were not shown because too many files have changed in this diff Show More