Merge remote-tracking branch 'parity/master'

This commit is contained in:
keorn 2016-09-19 14:33:59 +02:00
commit 551b5d7b6c
65 changed files with 1814 additions and 550 deletions

View File

@ -21,13 +21,15 @@ linux-stable:
- export - export
- cargo build --release --verbose - cargo build --release --verbose
- strip target/release/parity - strip target/release/parity
- mkdir -p x86_64-unknown-linux-gnu/stable
- cp target/release/parity x86_64-unknown-linux-gnu/stable/parity
tags: tags:
- rust - rust
- rust-stable - rust-stable
artifacts: artifacts:
paths: paths:
- target/release/parity - x86_64-unknown-linux-gnu/stable/parity
name: "${CI_BUILD_NAME}_parity" name: "stable-x86_64-unknown-linux-gnu_parity"
linux-beta: linux-beta:
stage: build stage: build
image: ethcore/rust:beta image: ethcore/rust:beta
@ -40,14 +42,16 @@ linux-beta:
- export - export
- cargo build --release --verbose - cargo build --release --verbose
- strip target/release/parity - strip target/release/parity
- cp target/release/parity parity - mkdir -p x86_64-unknown-linux-gnu/beta
- cp target/release/parity x86_64-unknown-linux-gnu/beta/parity
tags: tags:
- rust - rust
- rust-beta - rust-beta
artifacts: artifacts:
paths: paths:
- target/release/parity - x86_64-unknown-linux-gnu/beta/parity
name: "${CI_BUILD_NAME}_parity" name: "beta-x86_64-unknown-linux-gnu_parity"
allow_failure: true
linux-nightly: linux-nightly:
stage: build stage: build
image: ethcore/rust:nightly image: ethcore/rust:nightly
@ -59,13 +63,15 @@ linux-nightly:
script: script:
- cargo build --release --verbose - cargo build --release --verbose
- strip target/release/parity - strip target/release/parity
- mkdir -p x86_64-unknown-linux-gnu/nightly
- cp target/release/parity x86_64-unknown-linux-gnu/nigthly/parity
tags: tags:
- rust - rust
- rust-nightly - rust-nightly
artifacts: artifacts:
paths: paths:
- target/release/parity - x86_64-unknown-linux-gnu/nigthly/parity
name: "${CI_BUILD_NAME}_parity" name: "nigthly-x86_64-unknown-linux-gnu_parity"
allow_failure: true allow_failure: true
linux-centos: linux-centos:
stage: build stage: build
@ -80,13 +86,15 @@ linux-centos:
- export CC="gcc" - export CC="gcc"
- cargo build --release --verbose - cargo build --release --verbose
- strip target/release/parity - strip target/release/parity
- mkdir -p x86_64-unknown-linux-gnu/centos
- cp target/release/parity x86_64-unknown-linux-gnu/centos/parity
tags: tags:
- rust - rust
- rust-centos - rust-centos
artifacts: artifacts:
paths: paths:
- target/release/parity - x86_64-unknown-linux-gnu/centos/parity
name: "${CI_BUILD_NAME}_parity" name: "centos-x86_64-unknown-linux-gnu_parity"
linux-armv7: linux-armv7:
stage: build stage: build
image: ethcore/rust-arm:latest image: ethcore/rust-arm:latest
@ -97,7 +105,6 @@ linux-armv7:
- stable - stable
script: script:
- export - export
- export CXX="arm-linux-gnueabihf-g++"
- rm -rf .cargo - rm -rf .cargo
- mkdir -p .cargo - mkdir -p .cargo
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
@ -105,13 +112,15 @@ linux-armv7:
- cat .cargo/config - cat .cargo/config
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
- mkdir -p armv7_unknown_linux_gnueabihf
- cp target/release/party armv7_unknown_linux_gnueabihf/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/armv7-unknown-linux-gnueabihf/release/parity - armv7-unknown-linux-gnueabihf/parity
name: "${CI_BUILD_NAME}_parity" name: "armv7_unknown_linux_gnueabihf_parity"
allow_failure: true allow_failure: true
linux-arm: linux-arm:
stage: build stage: build
@ -123,20 +132,22 @@ linux-arm:
- stable - stable
script: script:
- export - export
#- rm -rf .cargo - rm -rf .cargo
#- mkdir -p .cargo - mkdir -p .cargo
#- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
#- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
#- cat .cargo/config - cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose - cargo build --target arm-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
- mkdir -p arm-unknown-linux-gnueabihf
- cp target/release/parity arm-unknown-linux-gnueabihf/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/arm-unknown-linux-gnueabihf/release/parity - arm-unknown-linux-gnueabihf/parity
name: "${CI_BUILD_NAME}_parity" name: "arm-unknown-linux-gnueabihf_parity"
allow_failure: true allow_failure: true
linux-armv6: linux-armv6:
stage: build stage: build
@ -148,20 +159,22 @@ linux-armv6:
- stable - stable
script: script:
- export - export
#- rm -rf .cargo - rm -rf .cargo
#- mkdir -p .cargo - mkdir -p .cargo
#- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
#- echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config - echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config
#- cat .cargo/config - cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabi --release --verbose - cargo build --target arm-unknown-linux-gnueabi --release --verbose
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
- mkdir -p arm-unknown-linux-gnueabi
- cp target/release/parity arm-unknown-linux-gnueabi/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/arm-unknown-linux-gnueabi/release/parity - arm-unknown-linux-gnueabi/parity
name: "${CI_BUILD_NAME}_parity" name: "arm-unknown-linux-gnueabi_parity"
allow_failure: true allow_failure: true
linux-aarch64: linux-aarch64:
stage: build stage: build
@ -173,20 +186,22 @@ linux-aarch64:
- stable - stable
script: script:
- export - export
#- rm -rf .cargo - rm -rf .cargo
#- mkdir -p .cargo - mkdir -p .cargo
#- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
#- echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config - echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config
#- cat .cargo/config - cat .cargo/config
- cargo build --target aarch64-unknown-linux-gnu --release --verbose - cargo build --target aarch64-unknown-linux-gnu --release --verbose
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
- mkdir -p aarch64-unknown-linux-gnu
- cp target/release/parity aarch64-unknown-linux-gnu/parity
tags: tags:
- rust - rust
- rust-arm - rust-arm
artifacts: artifacts:
paths: paths:
- target/aarch64-unknown-linux-gnu/release/parity - aarch64-unknown-linux-gnu/parity
name: "${CI_BUILD_NAME}_parity" name: "aarch64-unknown-linux-gnu_parity"
allow_failure: true allow_failure: true
darwin: darwin:
stage: build stage: build
@ -197,12 +212,14 @@ darwin:
- stable - stable
script: script:
- cargo build --release --verbose - cargo build --release --verbose
- mkdir -p x86_64-apple-darwin
- cp target/release/parity x86_64-apple-darwin/parity
tags: tags:
- osx - osx
artifacts: artifacts:
paths: paths:
- target/release/parity - x86_64-apple-darwin/parity
name: "${CI_BUILD_NAME}_parity" name: "x86_64-apple-darwin_parity"
windows: windows:
stage: build stage: build
only: only:
@ -222,7 +239,7 @@ windows:
paths: paths:
- target/release/parity.exe - target/release/parity.exe
- target/release/parity.pdb - target/release/parity.pdb
name: "${CI_BUILD_NAME}_parity" name: "x86_64-pc-windows-msvc_parity"
test-linux: test-linux:
stage: test stage: test
before_script: before_script:
@ -241,4 +258,4 @@ deploy-binaries:
- tags - tags
- stable - stable
script: script:
- ./deploy.sh - scripts/deploy.sh

86
Cargo.lock generated
View File

@ -3,7 +3,7 @@ name = "parity"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
@ -38,6 +38,7 @@ dependencies = [
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -84,16 +85,6 @@ name = "base64"
version = "0.2.1" version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bigint"
version = "0.1.0"
dependencies = [
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "bit-set" name = "bit-set"
version = "0.4.0" version = "0.4.0"
@ -147,15 +138,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "clippy" name = "clippy"
version = "0.0.85" version = "0.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "clippy_lints" name = "clippy_lints"
version = "0.0.85" version = "0.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -277,7 +268,7 @@ version = "1.4.0"
dependencies = [ dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0", "ethash 1.4.0",
@ -296,7 +287,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0", "rlp 0.1.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -304,18 +295,28 @@ dependencies = [
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "ethcore-bigint"
version = "0.1.0"
dependencies = [
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "ethcore-dapps" name = "ethcore-dapps"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-rpc 1.4.0", "ethcore-rpc 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"https-fetch 0.1.0", "https-fetch 0.1.0",
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -450,7 +451,7 @@ dependencies = [
name = "ethcore-rpc" name = "ethcore-rpc"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.4.0", "ethash 1.4.0",
"ethcore 1.4.0", "ethcore 1.4.0",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
@ -462,7 +463,7 @@ dependencies = [
"ethstore 0.1.0", "ethstore 0.1.0",
"ethsync 1.4.0", "ethsync 1.4.0",
"json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0", "rlp 0.1.0",
@ -478,13 +479,13 @@ dependencies = [
name = "ethcore-signer" name = "ethcore-signer"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
"ethcore-rpc 1.4.0", "ethcore-rpc 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps-signer 1.4.0 (git+https://github.com/ethcore/parity-ui.git)", "parity-dapps-signer 1.4.0 (git+https://github.com/ethcore/parity-ui.git)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
@ -503,7 +504,7 @@ dependencies = [
"ethcore-ipc-nano 1.4.0", "ethcore-ipc-nano 1.4.0",
"ethcore-util 1.4.0", "ethcore-util 1.4.0",
"json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)", "json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
@ -516,11 +517,11 @@ version = "1.4.0"
dependencies = [ dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"bigint 0.1.0", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0",
"ethcore-devtools 1.4.0", "ethcore-devtools 1.4.0",
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
@ -547,8 +548,8 @@ dependencies = [
name = "ethcrypto" name = "ethcrypto"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bigint 0.1.0",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
@ -569,9 +570,9 @@ dependencies = [
name = "ethkey" name = "ethkey"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"bigint 0.1.0",
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
"ethcore-bigint 0.1.0",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -602,7 +603,7 @@ dependencies = [
name = "ethsync" name = "ethsync"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 1.4.0", "ethcore 1.4.0",
"ethcore-io 1.4.0", "ethcore-io 1.4.0",
@ -771,7 +772,7 @@ source = "git+https://github.com/ethcore/json-ipc-server.git#5fbd0253750d3097b9a
dependencies = [ dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -786,7 +787,7 @@ source = "git+https://github.com/ethcore/json-tcp-server#c2858522274ae56042472bb
dependencies = [ dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -795,7 +796,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-core" name = "jsonrpc-core"
version = "3.0.0" version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -808,10 +809,10 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-http-server" name = "jsonrpc-http-server"
version = "6.1.0" version = "6.1.0"
source = "git+https://github.com/ethcore/jsonrpc-http-server.git#339f7209b01d26aea01722b3a69127235287d6a9" source = "git+https://github.com/ethcore/jsonrpc-http-server.git#2766c6708f66f6f4e667211461d220b49c0d9fdf"
dependencies = [ dependencies = [
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1306,7 +1307,7 @@ dependencies = [
[[package]] [[package]]
name = "rayon" name = "rayon"
version = "0.3.1" version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1344,8 +1345,8 @@ dependencies = [
name = "rlp" name = "rlp"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bigint 0.1.0",
"elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.0",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -1676,6 +1677,14 @@ dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "toml"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "traitobject" name = "traitobject"
version = "0.0.1" version = "0.0.1"
@ -1854,8 +1863,8 @@ dependencies = [
"checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27"
"checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "<none>"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "97f6d6efa6d7aec74d4eca1be62164b605d43b7fcb5256e9db0449f685130cba" "checksum clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "d19bda68c3db98e3a780342f6101b44312fef20a5f13ce756d1202a35922b01b"
"checksum clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "dc96d3c877b63943b08ce3037c0ae8fd3bd5dead5fab11178b93afc71ca16031" "checksum clippy_lints 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)" = "3d4ed67c69b9bb35169be2538691d290a3aa0cbfd4b9f0bfb7c221fc1d399a96"
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245" "checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
"checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc" "checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc"
"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>" "checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>"
@ -1941,7 +1950,7 @@ dependencies = [
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c" "checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
"checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a" "checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a"
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5" "checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
"checksum rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "941deb43a6254b9867fec1e0caeda38a2ad905ab18c57f7c68c396ca68998c07" "checksum rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "655df67c314c30fa3055a365eae276eb88aa4f3413a352a1ab32c1320eda41ea"
"checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29" "checksum regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)" = "b4329b8928a284580a1c63ec9d846b12f6d3472317243ff7077aff11f23f2b29"
"checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9" "checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9"
"checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb" "checksum ring 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d059a6a96d3be79042e3f70eb97945912839265f9d8ab45b921abaf266c70dbb"
@ -1983,6 +1992,7 @@ dependencies = [
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
"checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270"
"checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6"
"checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72"
"checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616" "checksum traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "07eaeb7689bb7fca7ce15628319635758eda769fed481ecfe6686ddef2600616"
"checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0" "checksum transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15f7cc7116182edca1ed08f6f8c4da92104555ca77addbabea4eaa59b20373d0"
"checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" "checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887"

View File

@ -25,6 +25,7 @@ ansi_term = "0.7"
lazy_static = "0.2" lazy_static = "0.2"
regex = "0.1" regex = "0.1"
isatty = "0.1" isatty = "0.1"
toml = "0.2"
ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" }
fdlimit = { path = "util/fdlimit" } fdlimit = { path = "util/fdlimit" }
ethcore = { path = "ethcore" } ethcore = { path = "ethcore" }
@ -41,7 +42,7 @@ ethcore-logger = { path = "logger" }
rlp = { path = "util/rlp" } rlp = { path = "util/rlp" }
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
ethcore-dapps = { path = "dapps", optional = true } ethcore-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethcore-stratum = { path = "stratum" } ethcore-stratum = { path = "stratum" }
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]

View File

@ -33,7 +33,7 @@ parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", versio
parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" }
parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true } parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true }
mime_guess = { version = "1.6.1" } mime_guess = { version = "1.6.1" }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.8", optional = true } serde_codegen = { version = "0.8", optional = true }

View File

@ -115,7 +115,7 @@ fn should_serve_rpc() {
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#));
} }
#[test] #[test]
@ -137,7 +137,7 @@ fn should_serve_rpc_at_slash_rpc() {
// then // then
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); assert_eq!(response.body, format!("58\n{}\n\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#));
} }

View File

@ -11,7 +11,7 @@ build = "build.rs"
ethcore-ipc-codegen = { path = "../ipc/codegen" } ethcore-ipc-codegen = { path = "../ipc/codegen" }
[dependencies] [dependencies]
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }

View File

@ -91,7 +91,7 @@ pub struct Light {
seed_compute: Mutex<SeedHashCompute>, seed_compute: Mutex<SeedHashCompute>,
} }
/// Light cache structur /// Light cache structure
impl Light { impl Light {
/// Create a new light cache for a given block number /// Create a new light cache for a given block number
pub fn new(block_number: u64) -> Light { pub fn new(block_number: u64) -> Light {
@ -134,16 +134,27 @@ impl Light {
}) })
} }
pub fn to_file(&self) -> io::Result<()> { pub fn to_file(&self) -> io::Result<PathBuf> {
let seed_compute = self.seed_compute.lock(); let seed_compute = self.seed_compute.lock();
let path = Light::file_path(seed_compute.get_seedhash(self.block_number)); let path = Light::file_path(seed_compute.get_seedhash(self.block_number));
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
let deprecated = Light::file_path(
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2));
if deprecated.exists() {
debug!(target: "ethash", "removing: {:?}", &deprecated);
try!(fs::remove_file(deprecated));
}
}
try!(fs::create_dir_all(path.parent().unwrap())); try!(fs::create_dir_all(path.parent().unwrap()));
let mut file = try!(File::create(path)); let mut file = try!(File::create(&path));
let cache_size = self.cache.len() * NODE_BYTES; let cache_size = self.cache.len() * NODE_BYTES;
let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) }; let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) };
try!(file.write(buf)); try!(file.write(buf));
Ok(()) Ok(path)
} }
} }
@ -455,3 +466,18 @@ fn test_seed_compute_after_newer() {
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162]; let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash); assert_eq!(seed_compute.get_seedhash(486382), hash);
} }
#[test]
fn test_drop_old_data() {
let first = Light::new(0).to_file().unwrap();
let second = Light::new(ETHASH_EPOCH_LENGTH).to_file().unwrap();
assert!(fs::metadata(&first).is_ok());
let _ = Light::new(ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = Light::new(ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err());
}

View File

@ -20,12 +20,12 @@ num_cpus = "0.2"
crossbeam = "0.2.9" crossbeam = "0.2.9"
lazy_static = "0.2" lazy_static = "0.2"
bloomchain = "0.1" bloomchain = "0.1"
rayon = "0.3.1" rayon = "0.4.2"
semver = "0.2" semver = "0.2"
bit-set = "0.4" bit-set = "0.4"
time = "0.1" time = "0.1"
evmjit = { path = "../evmjit", optional = true } evmjit = { path = "../evmjit", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }

View File

@ -417,7 +417,7 @@ impl ClosedBlock {
} }
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
pub fn reopen<'a>(self, engine: &'a Engine) -> OpenBlock<'a> { pub fn reopen(self, engine: &Engine) -> OpenBlock {
// revert rewards (i.e. set state back at last transaction's state). // revert rewards (i.e. set state back at last transaction's state).
let mut block = self.block; let mut block = self.block;
block.state = self.unclosed_state; block.state = self.unclosed_state;

View File

@ -23,6 +23,7 @@ use header::*;
use super::extras::*; use super::extras::*;
use transaction::*; use transaction::*;
use views::*; use views::*;
use log_entry::{LogEntry, LocalizedLogEntry};
use receipt::Receipt; use receipt::Receipt;
use blooms::{Bloom, BloomGroup}; use blooms::{Bloom, BloomGroup};
use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData};
@ -127,6 +128,10 @@ pub trait BlockProvider {
/// Returns numbers of blocks containing given bloom. /// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>; fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
/// Returns logs matching given filter.
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized;
} }
#[derive(Debug, Hash, Eq, PartialEq, Clone)] #[derive(Debug, Hash, Eq, PartialEq, Clone)]
@ -315,6 +320,51 @@ impl BlockProvider for BlockChain {
.map(|b| b as BlockNumber) .map(|b| b as BlockNumber)
.collect() .collect()
} }
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized {
// sort in reverse order
blocks.sort_by(|a, b| b.cmp(a));
let mut log_index = 0;
let mut logs = blocks.into_iter()
.filter_map(|number| self.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, mut receipts, hashes)| {
assert_eq!(receipts.len(), hashes.len());
log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len());
let receipts_len = receipts.len();
receipts.reverse();
receipts.into_iter()
.map(|receipt| receipt.logs)
.zip(hashes)
.enumerate()
.flat_map(move |(index, (mut logs, tx_hash))| {
let current_log_index = log_index;
log_index -= logs.len();
logs.reverse();
logs.into_iter()
.enumerate()
.map(move |(i, log)| LocalizedLogEntry {
entry: log,
block_hash: hash,
block_number: number,
transaction_hash: tx_hash,
// iterating in reverse order
transaction_index: receipts_len - index - 1,
log_index: current_log_index - i - 1,
})
})
})
.filter(|log_entry| matches(&log_entry.entry))
.take(limit.unwrap_or(::std::usize::MAX))
.collect::<Vec<LocalizedLogEntry>>();
logs.reverse();
logs
}
} }
pub struct AncestryIter<'a> { pub struct AncestryIter<'a> {
@ -1160,6 +1210,7 @@ mod tests {
use blockchain::extras::TransactionAddress; use blockchain::extras::TransactionAddress;
use views::BlockView; use views::BlockView;
use transaction::{Transaction, Action}; use transaction::{Transaction, Action};
use log_entry::{LogEntry, LocalizedLogEntry};
fn new_db(path: &str) -> Arc<Database> { fn new_db(path: &str) -> Arc<Database> {
Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap()) Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap())
@ -1235,7 +1286,7 @@ mod tests {
let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let bc = BlockChain::new(Config::default(), &genesis, db.clone());
let mut block_hashes = vec![genesis_hash.clone()]; let mut block_hashes = vec![genesis_hash.clone()];
let mut batch =db.transaction(); let mut batch = db.transaction();
for _ in 0..10 { for _ in 0..10 {
let block = canon_chain.generate(&mut finalizer).unwrap(); let block = canon_chain.generate(&mut finalizer).unwrap();
block_hashes.push(BlockView::new(&block).header_view().sha3()); block_hashes.push(BlockView::new(&block).header_view().sha3());
@ -1566,7 +1617,7 @@ mod tests {
let mut block_header = bc.block_header(&best_hash); let mut block_header = bc.block_header(&best_hash);
while !block_header.is_none() { while !block_header.is_none() {
block_header = bc.block_header(&block_header.unwrap().parent_hash()); block_header = bc.block_header(block_header.unwrap().parent_hash());
} }
assert!(bc.cache_size().blocks > 1024 * 1024); assert!(bc.cache_size().blocks > 1024 * 1024);
@ -1612,13 +1663,134 @@ mod tests {
} }
fn insert_block(db: &Arc<Database>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute { fn insert_block(db: &Arc<Database>, bc: &BlockChain, bytes: &[u8], receipts: Vec<Receipt>) -> ImportRoute {
let mut batch =db.transaction(); let mut batch = db.transaction();
let res = bc.insert_block(&mut batch, bytes, receipts); let res = bc.insert_block(&mut batch, bytes, receipts);
db.write(batch).unwrap(); db.write(batch).unwrap();
bc.commit(); bc.commit();
res res
} }
#[test]
fn test_logs() {
// given
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
// just insert dummy transaction so that #transactions=#receipts
let t1 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let t2 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let t3 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 100_000.into(),
action: Action::Create,
value: 100.into(),
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
}.sign(&"".sha3());
let tx_hash1 = t1.hash();
let tx_hash2 = t2.hash();
let tx_hash3 = t3.hash();
let b1 = canon_chain.with_transaction(t1).with_transaction(t2).generate(&mut finalizer).unwrap();
let b2 = canon_chain.with_transaction(t3).generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
insert_block(&db, &bc, &b1, vec![Receipt {
state_root: H256::default(),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![1], },
LogEntry { address: Default::default(), topics: vec![], data: vec![2], },
],
},
Receipt {
state_root: H256::default(),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![3], },
],
}]);
insert_block(&db, &bc, &b2, vec![
Receipt {
state_root: H256::default(),
gas_used: 10_000.into(),
log_bloom: Default::default(),
logs: vec![
LogEntry { address: Default::default(), topics: vec![], data: vec![4], },
],
}
]);
// when
let block1 = BlockView::new(&b1);
let block2 = BlockView::new(&b2);
let logs1 = bc.logs(vec![1, 2], |_| true, None);
let logs2 = bc.logs(vec![1, 2], |_| true, Some(1));
// then
assert_eq!(logs1, vec![
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![1] },
block_hash: block1.hash(),
block_number: block1.header().number(),
transaction_hash: tx_hash1.clone(),
transaction_index: 0,
log_index: 0,
},
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![2] },
block_hash: block1.hash(),
block_number: block1.header().number(),
transaction_hash: tx_hash1.clone(),
transaction_index: 0,
log_index: 1,
},
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![3] },
block_hash: block1.hash(),
block_number: block1.header().number(),
transaction_hash: tx_hash2.clone(),
transaction_index: 1,
log_index: 2,
},
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] },
block_hash: block2.hash(),
block_number: block2.header().number(),
transaction_hash: tx_hash3.clone(),
transaction_index: 0,
log_index: 0,
}
]);
assert_eq!(logs2, vec![
LocalizedLogEntry {
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![4] },
block_hash: block2.hash(),
block_number: block2.header().number(),
transaction_hash: tx_hash3.clone(),
transaction_index: 0,
log_index: 0,
}
]);
}
#[test] #[test]
fn test_bloom_filter_simple() { fn test_bloom_filter_simple() {
// TODO: From here // TODO: From here

View File

@ -957,10 +957,8 @@ impl BlockChainClient for Client {
} }
} }
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> { fn logs(&self, filter: Filter, limit: Option<usize>) -> Vec<LocalizedLogEntry> {
// TODO: lock blockchain only once let blocks = filter.bloom_possibilities().iter()
let mut blocks = filter.bloom_possibilities().iter()
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
.flat_map(|m| m) .flat_map(|m| m)
// remove duplicate elements // remove duplicate elements
@ -968,35 +966,7 @@ impl BlockChainClient for Client {
.into_iter() .into_iter()
.collect::<Vec<u64>>(); .collect::<Vec<u64>>();
blocks.sort(); self.chain.read().logs(blocks, |entry| filter.matches(entry), limit)
let chain = self.chain.read();
blocks.into_iter()
.filter_map(|number| chain.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| chain.block_body(&hash).map(|ref b| (number, hash, receipts, BodyView::new(b).transaction_hashes())))
.flat_map(|(number, hash, receipts, hashes)| {
let mut log_index = 0;
receipts.into_iter()
.enumerate()
.flat_map(|(index, receipt)| {
log_index += receipt.logs.len();
receipt.logs.into_iter()
.enumerate()
.filter(|tuple| filter.matches(&tuple.1))
.map(|(i, log)| LocalizedLogEntry {
entry: log,
block_hash: hash.clone(),
block_number: number,
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::default),
transaction_index: index,
log_index: log_index + i
})
.collect::<Vec<LocalizedLogEntry>>()
})
.collect::<Vec<LocalizedLogEntry>>()
})
.collect()
} }
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> { fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {

View File

@ -390,8 +390,8 @@ impl BlockChainClient for TestBlockChainClient {
unimplemented!(); unimplemented!();
} }
fn logs(&self, _filter: Filter) -> Vec<LocalizedLogEntry> { fn logs(&self, _filter: Filter, _limit: Option<usize>) -> Vec<LocalizedLogEntry> {
unimplemented!(); Vec::new()
} }
fn last_hashes(&self) -> LastHashes { fn last_hashes(&self) -> LastHashes {

View File

@ -156,7 +156,7 @@ pub trait BlockChainClient : Sync + Send {
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>; fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>;
/// Returns logs matching given filter. /// Returns logs matching given filter.
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>; fn logs(&self, filter: Filter, limit: Option<usize>) -> Vec<LocalizedLogEntry>;
/// Makes a non-persistent transaction call. /// Makes a non-persistent transaction call.
fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result<Executed, CallError>; fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result<Executed, CallError>;

View File

@ -99,7 +99,9 @@ impl Engine for BasicAuthority {
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
fn on_close_block(&self, _block: &mut ExecutedBlock) {} fn on_close_block(&self, _block: &mut ExecutedBlock) {}
fn seals_internally(&self) -> bool { true } fn is_sealer(&self, author: &Address) -> Option<bool> {
Some(self.our_params.authorities.contains(author))
}
/// Attempt to seal the block internally. /// Attempt to seal the block internally.
/// ///
@ -259,4 +261,14 @@ mod tests {
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap(); let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
assert!(b.try_seal(engine, seal).is_ok()); assert!(b.try_seal(engine, seal).is_ok());
} }
#[test]
fn seals_internally() {
let tap = AccountProvider::transient_provider();
let authority = tap.insert_account("".sha3(), "").unwrap();
let engine = new_test_authority().engine;
assert!(!engine.is_sealer(&Address::default()).unwrap());
assert!(engine.is_sealer(&authority).unwrap());
}
} }

View File

@ -58,7 +58,7 @@ impl Engine for InstantSeal {
Schedule::new_homestead() Schedule::new_homestead()
} }
fn seals_internally(&self) -> bool { true } fn is_sealer(&self, _author: &Address) -> Option<bool> { Some(true) }
fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> { fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
Some(Vec::new()) Some(Vec::new())

View File

@ -71,8 +71,11 @@ pub trait Engine : Sync + Send {
/// Block transformation functions, after the transactions. /// Block transformation functions, after the transactions.
fn on_close_block(&self, _block: &mut ExecutedBlock) {} fn on_close_block(&self, _block: &mut ExecutedBlock) {}
/// If true, generate_seal has to be implemented. /// If Some(true) this author is able to generate seals, generate_seal has to be implemented.
fn seals_internally(&self) -> bool { false } /// None indicates that this Engine never seals internally regardless of author (e.g. PoW).
fn is_sealer(&self, _author: &Address) -> Option<bool> { None }
/// Checks if default address is able to seal.
fn is_default_sealer(&self) -> Option<bool> { self.is_sealer(&Default::default()) }
/// Attempt to seal the block internally. /// Attempt to seal the block internally.
/// ///
/// If `Some` is returned, then you get a valid seal. /// If `Some` is returned, then you get a valid seal.

View File

@ -160,7 +160,7 @@ impl Engine for Ethash {
let fields = block.fields_mut(); let fields = block.fields_mut();
// Bestow block reward // Bestow block reward
fields.state.add_balance(&fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len()))); fields.state.add_balance(fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())));
// Bestow uncle rewards // Bestow uncle rewards
let current_number = fields.header.number(); let current_number = fields.header.number();

View File

@ -175,6 +175,7 @@ pub struct Miner {
sealing_block_last_request: Mutex<u64>, sealing_block_last_request: Mutex<u64>,
// for sealing... // for sealing...
options: MinerOptions, options: MinerOptions,
seals_internally: bool,
gas_range_target: RwLock<(U256, U256)>, gas_range_target: RwLock<(U256, U256)>,
author: RwLock<Address>, author: RwLock<Address>,
@ -187,33 +188,24 @@ pub struct Miner {
} }
impl Miner { impl Miner {
/// Creates new instance of miner without accounts, but with given spec. /// Creates new instance of miner.
pub fn with_spec(spec: &Spec) -> Miner { fn new_raw(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Miner {
Miner { let work_poster = match options.new_work_notify.is_empty() {
transaction_queue: Arc::new(Mutex::new(TransactionQueue::new())), true => None,
options: Default::default(), false => Some(WorkPoster::new(&options.new_work_notify))
next_allowed_reseal: Mutex::new(Instant::now()), };
sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(20), enabled: false}),
gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()),
accounts: None,
engine: spec.engine.clone(),
work_poster: None,
gas_pricer: Mutex::new(GasPricer::new_fixed(20_000_000_000u64.into())),
}
}
/// Creates new instance of miner
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
let work_poster = if !options.new_work_notify.is_empty() { Some(WorkPoster::new(&options.new_work_notify)) } else { None };
let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit))); let txq = Arc::new(Mutex::new(TransactionQueue::with_limits(options.tx_queue_size, options.tx_gas_limit)));
Arc::new(Miner { Miner {
transaction_queue: txq, transaction_queue: txq,
next_allowed_reseal: Mutex::new(Instant::now()), next_allowed_reseal: Mutex::new(Instant::now()),
sealing_block_last_request: Mutex::new(0), sealing_block_last_request: Mutex::new(0),
sealing_work: Mutex::new(SealingWork{queue: UsingQueue::new(options.work_queue_size), enabled: options.force_sealing || !options.new_work_notify.is_empty()}), sealing_work: Mutex::new(SealingWork{
queue: UsingQueue::new(options.work_queue_size),
enabled: options.force_sealing
|| !options.new_work_notify.is_empty()
|| spec.engine.is_default_sealer().unwrap_or(false)
}),
seals_internally: spec.engine.is_default_sealer().is_some(),
gas_range_target: RwLock::new((U256::zero(), U256::zero())), gas_range_target: RwLock::new((U256::zero(), U256::zero())),
author: RwLock::new(Address::default()), author: RwLock::new(Address::default()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
@ -222,7 +214,17 @@ impl Miner {
engine: spec.engine.clone(), engine: spec.engine.clone(),
work_poster: work_poster, work_poster: work_poster,
gas_pricer: Mutex::new(gas_pricer), gas_pricer: Mutex::new(gas_pricer),
}) }
}
/// Creates new instance of miner without accounts, but with given spec.
pub fn with_spec(spec: &Spec) -> Miner {
Miner::new_raw(Default::default(), GasPricer::new_fixed(20_000_000_000u64.into()), spec, None)
}
/// Creates new instance of a miner Arc.
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Arc<Miner> {
Arc::new(Miner::new_raw(options, gas_pricer, spec, accounts))
} }
fn forced_sealing(&self) -> bool { fn forced_sealing(&self) -> bool {
@ -244,6 +246,7 @@ impl Miner {
self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone()) self.sealing_work.lock().queue.peek_last_ref().map(|b| b.base().clone())
} }
#[cfg_attr(feature="dev", allow(match_same_arms))]
/// Prepares new block for sealing including top transactions from queue. /// Prepares new block for sealing including top transactions from queue.
fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option<H256>) { fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option<H256>) {
{ {
@ -360,7 +363,7 @@ impl Miner {
true true
} }
} else { } else {
// sealing is disabled. trace!(target: "miner", "requires_reseal: sealing is disabled");
false false
} }
} }
@ -578,6 +581,10 @@ impl MinerService for Miner {
} }
fn set_author(&self, author: Address) { fn set_author(&self, author: Address) {
if self.seals_internally {
let mut sealing_work = self.sealing_work.lock();
sealing_work.enabled = self.engine.is_sealer(&author).unwrap_or(false);
}
*self.author.write() = author; *self.author.write() = author;
} }
@ -666,6 +673,7 @@ impl MinerService for Miner {
results results
} }
#[cfg_attr(feature="dev", allow(collapsible_if))]
fn import_own_transaction( fn import_own_transaction(
&self, &self,
chain: &MiningBlockChainClient, chain: &MiningBlockChainClient,
@ -703,7 +711,7 @@ impl MinerService for Miner {
if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() { if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() {
// Make sure to do it after transaction is imported and lock is droped. // Make sure to do it after transaction is imported and lock is droped.
// We need to create pending block and enable sealing. // We need to create pending block and enable sealing.
if self.engine.seals_internally() || !self.prepare_work_sealing(chain) { if self.seals_internally || !self.prepare_work_sealing(chain) {
// If new block has not been prepared (means we already had one) // If new block has not been prepared (means we already had one)
// or Engine might be able to seal internally, // or Engine might be able to seal internally,
// we need to update sealing. // we need to update sealing.
@ -821,7 +829,7 @@ impl MinerService for Miner {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
trace!(target: "miner", "update_sealing: preparing a block"); trace!(target: "miner", "update_sealing: preparing a block");
let (block, original_work_hash) = self.prepare_block(chain); let (block, original_work_hash) = self.prepare_block(chain);
if self.engine.seals_internally() { if self.seals_internally {
trace!(target: "miner", "update_sealing: engine indicates internal sealing"); trace!(target: "miner", "update_sealing: engine indicates internal sealing");
self.seal_and_import_block_internally(chain, block); self.seal_and_import_block_internally(chain, block);
} else { } else {
@ -1027,7 +1035,7 @@ mod tests {
assert_eq!(miner.pending_transactions_hashes().len(), 1); assert_eq!(miner.pending_transactions_hashes().len(), 1);
assert_eq!(miner.pending_receipts().len(), 1); assert_eq!(miner.pending_receipts().len(), 1);
// This method will let us know if pending block was created (before calling that method) // This method will let us know if pending block was created (before calling that method)
assert_eq!(miner.prepare_work_sealing(&client), false); assert!(!miner.prepare_work_sealing(&client));
} }
#[test] #[test]
@ -1046,16 +1054,26 @@ mod tests {
assert_eq!(miner.pending_transactions().len(), 0); assert_eq!(miner.pending_transactions().len(), 0);
assert_eq!(miner.pending_receipts().len(), 0); assert_eq!(miner.pending_receipts().len(), 0);
// This method will let us know if pending block was created (before calling that method) // This method will let us know if pending block was created (before calling that method)
assert_eq!(miner.prepare_work_sealing(&client), true); assert!(miner.prepare_work_sealing(&client));
}
#[test]
fn should_not_seal_unless_enabled() {
let miner = miner();
let client = TestBlockChainClient::default();
// By default resealing is not required.
assert!(!miner.requires_reseal(1u8.into()));
miner.import_external_transactions(&client, vec![transaction()]).pop().unwrap().unwrap();
assert!(miner.prepare_work_sealing(&client));
// Unless asked to prepare work.
assert!(miner.requires_reseal(1u8.into()));
} }
#[test] #[test]
fn internal_seals_without_work() { fn internal_seals_without_work() {
let miner = Miner::with_spec(&Spec::new_test_instant()); let miner = Miner::with_spec(&Spec::new_test_instant());
{
let mut sealing_work = miner.sealing_work.lock();
sealing_work.enabled = true;
}
let c = generate_dummy_client(2); let c = generate_dummy_client(2);
let client = c.reference().as_ref(); let client = c.reference().as_ref();

View File

@ -949,7 +949,7 @@ mod test {
new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment) new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment)
} }
/// Returns two transactions with identical (sender, nonce) but different gas_price/hash. /// Returns two transactions with identical (sender, nonce) but different gas price/hash.
fn new_similar_tx_pair() -> (SignedTransaction, SignedTransaction) { fn new_similar_tx_pair() -> (SignedTransaction, SignedTransaction) {
new_tx_pair_default(0.into(), 1.into()) new_tx_pair_default(0.into(), 1.into())
} }

View File

@ -45,6 +45,7 @@ impl StateProducer {
} }
} }
#[cfg_attr(feature="dev", allow(let_and_return))]
/// Tick the state producer. This alters the state, writing new data into /// Tick the state producer. This alters the state, writing new data into
/// the database. /// the database.
pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut HashDB) { pub fn tick<R: Rng>(&mut self, rng: &mut R, db: &mut HashDB) {

View File

@ -183,7 +183,7 @@ impl Spec {
let r = Rlp::new(&seal); let r = Rlp::new(&seal);
(0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect()
}); });
return header; header
} }
/// Compose the genesis block for this chain. /// Compose the genesis block for this chain.
@ -278,7 +278,7 @@ mod tests {
// https://github.com/ethcore/parity/issues/1840 // https://github.com/ethcore/parity/issues/1840
#[test] #[test]
fn test_load_empty() { fn test_load_empty() {
assert!(Spec::load(&vec![] as &[u8]).is_err()); assert!(Spec::load(&[] as &[u8]).is_err());
} }
#[test] #[test]

View File

@ -1314,13 +1314,13 @@ fn storage_at_from_database() {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let (root, db) = { let (root, db) = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state_in(temp.as_path());
state.set_storage(&a, H256::from(&U256::from(01u64)), H256::from(&U256::from(69u64))); state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64)));
state.commit().unwrap(); state.commit().unwrap();
state.drop() state.drop()
}; };
let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); let s = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
assert_eq!(s.storage_at(&a, &H256::from(&U256::from(01u64))), H256::from(&U256::from(69u64))); assert_eq!(s.storage_at(&a, &H256::from(&U256::from(1u64))), H256::from(&U256::from(69u64)));
} }
#[test] #[test]

View File

@ -19,6 +19,7 @@ use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, Blo
use ethereum; use ethereum;
use block::IsBlock; use block::IsBlock;
use tests::helpers::*; use tests::helpers::*;
use types::filter::Filter;
use common::*; use common::*;
use devtools::*; use devtools::*;
use miner::Miner; use miner::Miner;
@ -131,6 +132,34 @@ fn returns_chain_info() {
assert_eq!(info.best_block_hash, block.header().hash()); assert_eq!(info.best_block_hash, block.header().hash());
} }
#[test]
fn returns_logs() {
let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let logs = client.logs(Filter {
from_block: BlockID::Earliest,
to_block: BlockID::Latest,
address: None,
topics: vec![],
}, None);
assert_eq!(logs.len(), 0);
}
#[test]
fn returns_logs_with_limit() {
let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let logs = client.logs(Filter {
from_block: BlockID::Earliest,
to_block: BlockID::Latest,
address: None,
topics: vec![],
}, Some(2));
assert_eq!(logs.len(), 0);
}
#[test] #[test]
fn returns_block_body() { fn returns_block_body() {
let dummy_block = get_good_dummy_block(); let dummy_block = get_good_dummy_block();

View File

@ -241,6 +241,7 @@ mod tests {
use spec::*; use spec::*;
use transaction::*; use transaction::*;
use tests::helpers::*; use tests::helpers::*;
use types::log_entry::{LogEntry, LocalizedLogEntry};
use rlp::View; use rlp::View;
fn check_ok(result: Result<(), Error>) { fn check_ok(result: Result<(), Error>) {
@ -333,6 +334,12 @@ mod tests {
fn block_receipts(&self, _hash: &H256) -> Option<BlockReceipts> { fn block_receipts(&self, _hash: &H256) -> Option<BlockReceipts> {
unimplemented!() unimplemented!()
} }
fn logs<F>(&self, _blocks: Vec<BlockNumber>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
where F: Fn(&LogEntry) -> bool, Self: Sized {
unimplemented!()
}
} }
fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> { fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> {

View File

@ -8,5 +8,5 @@ rust-crypto = "0.2.36"
tiny-keccak = "1.0" tiny-keccak = "1.0"
eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" }
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }

View File

@ -16,7 +16,7 @@
//! Crypto utils used ethstore and network. //! Crypto utils used ethstore and network.
extern crate bigint; extern crate ethcore_bigint as bigint;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate crypto as rcrypto; extern crate crypto as rcrypto;
extern crate secp256k1; extern crate secp256k1;

View File

@ -10,7 +10,7 @@ tiny-keccak = "1.0"
eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" }
rustc-serialize = "0.3" rustc-serialize = "0.3"
docopt = { version = "0.6", optional = true } docopt = { version = "0.6", optional = true }
bigint = { path = "../util/bigint" } ethcore-bigint = { path = "../util/bigint" }
[features] [features]
default = [] default = []

View File

@ -20,7 +20,7 @@ extern crate lazy_static;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate secp256k1; extern crate secp256k1;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate bigint; extern crate ethcore_bigint as bigint;
mod brain; mod brain;
mod error; mod error;

View File

@ -10,7 +10,7 @@ rustc-serialize = "0.3"
serde = "0.8" serde = "0.8"
serde_json = "0.8" serde_json = "0.8"
serde_macros = { version = "0.8", optional = true } serde_macros = { version = "0.8", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.8", optional = true } serde_codegen = { version = "0.8", optional = true }

View File

@ -17,20 +17,20 @@
use std::str::{FromStr, from_utf8}; use std::str::{FromStr, from_utf8};
use std::{io, fs}; use std::{io, fs};
use std::io::{BufReader, BufRead}; use std::io::{BufReader, BufRead};
use std::time::Duration; use std::time::{Instant, Duration};
use std::thread::sleep; use std::thread::sleep;
use std::sync::Arc; use std::sync::Arc;
use rustc_serialize::hex::FromHex; use rustc_serialize::hex::FromHex;
use ethcore_logger::{setup_log, Config as LogConfig}; use ethcore_logger::{setup_log, Config as LogConfig};
use io::{PanicHandler, ForwardPanic}; use io::{PanicHandler, ForwardPanic};
use util::ToPretty; use util::{ToPretty, Uint};
use rlp::PayloadInfo; use rlp::PayloadInfo;
use ethcore::service::ClientService; use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID};
use ethcore::error::ImportError; use ethcore::error::ImportError;
use ethcore::miner::Miner; use ethcore::miner::Miner;
use cache::CacheConfig; use cache::CacheConfig;
use informant::Informant; use informant::{Informant, MillisecondDuration};
use io_handler::ImportIoHandler; use io_handler::ImportIoHandler;
use params::{SpecType, Pruning}; use params::{SpecType, Pruning};
use helpers::{to_client_config, execute_upgrades}; use helpers::{to_client_config, execute_upgrades};
@ -108,6 +108,8 @@ pub fn execute(cmd: BlockchainCmd) -> Result<String, String> {
} }
fn execute_import(cmd: ImportBlockchain) -> Result<String, String> { fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let timer = Instant::now();
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
@ -218,15 +220,25 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
} }
} }
client.flush_queue(); client.flush_queue();
let report = client.report();
Ok("Import completed.".into()) let ms = timer.elapsed().as_milliseconds();
Ok(format!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s",
ms / 1000,
report.blocks_imported,
(report.blocks_imported * 1000) as u64 / ms,
report.transactions_applied,
(report.transactions_applied * 1000) as u64 / ms,
report.gas_processed / From::from(1_000_000),
(report.gas_processed / From::from(ms * 1000)).low_u64(),
).into())
} }
fn execute_export(cmd: ExportBlockchain) -> Result<String, String> { fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
let format = cmd.format.unwrap_or_else(Default::default); let format = cmd.format.unwrap_or_default();
// load spec file // load spec file
let spec = try!(cmd.spec.spec()); let spec = try!(cmd.spec.spec());

View File

@ -54,12 +54,10 @@ pub fn payload<B: ipc::BinaryConvertable>() -> Result<B, BootError> {
let mut buffer = Vec::new(); let mut buffer = Vec::new();
try!( try!(
io::stdin().read_to_end(&mut buffer) io::stdin().read_to_end(&mut buffer).map_err(BootError::ReadArgs)
.map_err(|io_err| BootError::ReadArgs(io_err))
); );
ipc::binary::deserialize::<B>(&buffer) ipc::binary::deserialize::<B>(&buffer).map_err(BootError::DecodeArgs)
.map_err(|binary_error| BootError::DecodeArgs(binary_error))
} }
pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket<HypervisorServiceClient<NanoSocket>>{ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket<HypervisorServiceClient<NanoSocket>>{
@ -73,7 +71,7 @@ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> Guar
pub fn dependency<C: WithSocket<NanoSocket>>(url: &str) pub fn dependency<C: WithSocket<NanoSocket>>(url: &str)
-> Result<GuardedSocket<C>, BootError> -> Result<GuardedSocket<C>, BootError>
{ {
nanoipc::generic_client::<C>(url).map_err(|socket_err| BootError::DependencyConnect(socket_err)) nanoipc::generic_client::<C>(url).map_err(BootError::DependencyConnect)
} }
pub fn main_thread() -> Arc<AtomicBool> { pub fn main_thread() -> Arc<AtomicBool> {

View File

@ -0,0 +1,98 @@
[parity]
mode = "active"
mode_timeout = 300
mode_alarm = 3600
chain = "homestead"
db_path = "$HOME/.parity"
keys_path = "$HOME/.parity/keys"
identity = ""
[account]
unlock = ["0xdeadbeefcafe0000000000000000000000000000"]
password = ["~/.safe/password.file"]
keys_iterations = 10240
[signer]
force = false
disable = false
port = 8180
interface = "127.0.0.1"
path = "$HOME/.parity/signer"
[network]
disable = false
port = 30303
min_peers = 25
max_peers = 50
nat = "any"
id = "0x1"
bootnodes = []
discovery = true
reserved_only = false
reserved_peers = "./path_to_file"
[rpc]
disable = false
port = 8545
interface = "local"
cors = "null"
apis = ["web3", "eth", "net", "personal", "ethcore", "traces", "rpc"]
hosts = ["none"]
[ipc]
disable = false
path = "$HOME/.parity/jsonrpc.ipc"
apis = ["web3", "eth", "net", "personal", "ethcore", "traces", "rpc"]
[dapps]
disable = false
port = 8080
interface = "local"
hosts = ["none"]
path = "$HOME/.parity/dapps"
# authorization:
user = "test_user"
pass = "test_pass"
[mining]
author = "0xdeadbeefcafe0000000000000000000000000001"
force_sealing = true
reseal_on_txs = "all"
reseal_min_period = 4000
work_queue_size = 20
relay_set = "cheap"
usd_per_tx = "0"
usd_per_eth = "auto"
price_update_period = "hourly"
gas_floor_target = "4700000"
gas_cap = "6283184"
tx_queue_size = 1024
tx_gas_limit = "6283184"
extra_data = "Parity"
remove_solved = false
notify_work = ["http://localhost:3001"]
[footprint]
tracing = "auto"
pruning = "auto"
cache_size_db = 64
cache_size_blocks = 8
cache_size_queue = 50
cache_size = 128 # Overrides above caches with total size
fast_and_loose = false
db_compaction = "ssd"
fat_db = false
[snapshots]
disable_periodic = false
[vm]
jit = false
[misc]
logging = "own_tx=trace"
log_file = "/var/log/parity.log"
color = true

View File

@ -0,0 +1,2 @@
[account
unlock = "0x1"

View File

@ -0,0 +1,4 @@
[account]
unlock = "0x1"
passwd = []

63
parity/cli/config.toml Normal file
View File

@ -0,0 +1,63 @@
[parity]
mode = "dark"
mode_timeout = 15
mode_alarm = 10
chain = "./chain.json"
[account]
unlock = ["0x1", "0x2", "0x3"]
password = ["passwdfile path"]
[signer]
disable = true
[network]
disable = false
discovery = true
nat = "any"
min_peers = 10
max_peers = 20
reserved_only = true
reserved_peers = "./path/to/reserved_peers"
[rpc]
disable = true
port = 8180
[ipc]
apis = ["rpc", "eth"]
[dapps]
port = 8080
user = "username"
pass = "password"
[mining]
author = "0xdeadbeefcafe0000000000000000000000000001"
force_sealing = true
reseal_on_txs = "all"
reseal_min_period = 4000
price_update_period = "hourly"
tx_queue_size = 2048
[footprint]
tracing = "on"
pruning = "fast"
cache_size_db = 128
cache_size_blocks = 16
cache_size_queue = 100
db_compaction = "ssd"
fat_db = true
[snapshots]
disable_periodic = true
[vm]
jit = false
[misc]
logging = "own_tx=trace"
log_file = "/var/log/parity.log"
color = true

701
parity/cli/mod.rs Normal file
View File

@ -0,0 +1,701 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[macro_use]
mod usage;
usage! {
{
// Commands
cmd_daemon: bool,
cmd_wallet: bool,
cmd_account: bool,
cmd_new: bool,
cmd_list: bool,
cmd_export: bool,
cmd_import: bool,
cmd_signer: bool,
cmd_new_token: bool,
cmd_snapshot: bool,
cmd_restore: bool,
cmd_ui: bool,
// Arguments
arg_pid_file: String,
arg_file: Option<String>,
arg_path: Vec<String>,
// Flags
// -- Legacy Options
flag_geth: bool,
flag_testnet: bool,
flag_import_geth_keys: bool,
flag_datadir: Option<String>,
flag_networkid: Option<String>,
flag_peers: Option<u16>,
flag_nodekey: Option<String>,
flag_nodiscover: bool,
flag_jsonrpc: bool,
flag_jsonrpc_off: bool,
flag_webapp: bool,
flag_dapps_off: bool,
flag_rpc: bool,
flag_rpcaddr: Option<String>,
flag_rpcport: Option<u16>,
flag_rpcapi: Option<String>,
flag_rpccorsdomain: Option<String>,
flag_ipcdisable: bool,
flag_ipc_off: bool,
flag_ipcapi: Option<String>,
flag_ipcpath: Option<String>,
flag_gasprice: Option<String>,
flag_etherbase: Option<String>,
flag_extradata: Option<String>,
flag_cache: Option<u32>,
// -- Miscellaneous Options
flag_version: bool,
flag_no_config: bool,
}
{
// -- Operating Options
flag_mode: String = "active", or |c: &Config| otry!(c.parity).mode.clone(),
flag_mode_timeout: u64 = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(),
flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(),
flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(),
flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(),
flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(),
// -- Account Options
flag_unlock: Option<String> = None,
or |c: &Config| otry!(c.account).unlock.clone().map(|vec| Some(vec.join(","))),
flag_password: Vec<String> = Vec::new(),
or |c: &Config| otry!(c.account).password.clone(),
flag_keys_iterations: u32 = 10240u32,
or |c: &Config| otry!(c.account).keys_iterations.clone(),
flag_force_signer: bool = false,
or |c: &Config| otry!(c.signer).force.clone(),
flag_no_signer: bool = false,
or |c: &Config| otry!(c.signer).disable.clone(),
flag_signer_port: u16 = 8180u16,
or |c: &Config| otry!(c.signer).port.clone(),
flag_signer_interface: String = "local",
or |c: &Config| otry!(c.signer).interface.clone(),
flag_signer_path: String = "$HOME/.parity/signer",
or |c: &Config| otry!(c.signer).path.clone(),
// NOTE [todr] For security reasons don't put this to config files
flag_signer_no_validation: bool = false, or |_| None,
// -- Networking Options
flag_no_network: bool = false,
or |c: &Config| otry!(c.network).disable.clone(),
flag_port: u16 = 30303u16,
or |c: &Config| otry!(c.network).port.clone(),
flag_min_peers: u16 = 25u16,
or |c: &Config| otry!(c.network).min_peers.clone(),
flag_max_peers: u16 = 50u16,
or |c: &Config| otry!(c.network).max_peers.clone(),
flag_nat: String = "any",
or |c: &Config| otry!(c.network).nat.clone(),
flag_network_id: Option<String> = None,
or |c: &Config| otry!(c.network).id.clone().map(Some),
flag_bootnodes: Option<String> = None,
or |c: &Config| otry!(c.network).bootnodes.clone().map(|vec| Some(vec.join(","))),
flag_no_discovery: bool = false,
or |c: &Config| otry!(c.network).discovery.map(|d| !d).clone(),
flag_node_key: Option<String> = None,
or |c: &Config| otry!(c.network).node_key.clone().map(Some),
flag_reserved_peers: Option<String> = None,
or |c: &Config| otry!(c.network).reserved_peers.clone().map(Some),
flag_reserved_only: bool = false,
or |c: &Config| otry!(c.network).reserved_only.clone(),
// -- API and Console Options
// RPC
flag_no_jsonrpc: bool = false,
or |c: &Config| otry!(c.rpc).disable.clone(),
flag_jsonrpc_port: u16 = 8545u16,
or |c: &Config| otry!(c.rpc).port.clone(),
flag_jsonrpc_interface: String = "local",
or |c: &Config| otry!(c.rpc).interface.clone(),
flag_jsonrpc_cors: Option<String> = None,
or |c: &Config| otry!(c.rpc).cors.clone().map(Some),
flag_jsonrpc_apis: String = "web3,eth,net,ethcore,personal,traces,rpc",
or |c: &Config| otry!(c.rpc).apis.clone().map(|vec| vec.join(",")),
flag_jsonrpc_hosts: String = "none",
or |c: &Config| otry!(c.rpc).hosts.clone().map(|vec| vec.join(",")),
// IPC
flag_no_ipc: bool = false,
or |c: &Config| otry!(c.ipc).disable.clone(),
flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc",
or |c: &Config| otry!(c.ipc).path.clone(),
flag_ipc_apis: String = "web3,eth,net,ethcore,personal,traces,rpc",
or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")),
// DAPPS
flag_no_dapps: bool = false,
or |c: &Config| otry!(c.dapps).disable.clone(),
flag_dapps_port: u16 = 8080u16,
or |c: &Config| otry!(c.dapps).port.clone(),
flag_dapps_interface: String = "local",
or |c: &Config| otry!(c.dapps).interface.clone(),
flag_dapps_hosts: String = "none",
or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")),
flag_dapps_path: String = "$HOME/.parity/dapps",
or |c: &Config| otry!(c.dapps).path.clone(),
flag_dapps_user: Option<String> = None,
or |c: &Config| otry!(c.dapps).user.clone().map(Some),
flag_dapps_pass: Option<String> = None,
or |c: &Config| otry!(c.dapps).pass.clone().map(Some),
// -- Sealing/Mining Options
flag_author: Option<String> = None,
or |c: &Config| otry!(c.mining).author.clone().map(Some),
flag_force_sealing: bool = false,
or |c: &Config| otry!(c.mining).force_sealing.clone(),
flag_reseal_on_txs: String = "own",
or |c: &Config| otry!(c.mining).reseal_on_txs.clone(),
flag_reseal_min_period: u64 = 2000u64,
or |c: &Config| otry!(c.mining).reseal_min_period.clone(),
flag_work_queue_size: usize = 20usize,
or |c: &Config| otry!(c.mining).work_queue_size.clone(),
flag_tx_gas_limit: Option<String> = None,
or |c: &Config| otry!(c.mining).tx_gas_limit.clone().map(Some),
flag_relay_set: String = "cheap",
or |c: &Config| otry!(c.mining).relay_set.clone(),
flag_usd_per_tx: String = "0",
or |c: &Config| otry!(c.mining).usd_per_tx.clone(),
flag_usd_per_eth: String = "auto",
or |c: &Config| otry!(c.mining).usd_per_eth.clone(),
flag_price_update_period: String = "hourly",
or |c: &Config| otry!(c.mining).price_update_period.clone(),
flag_gas_floor_target: String = "4700000",
or |c: &Config| otry!(c.mining).gas_floor_target.clone(),
flag_gas_cap: String = "6283184",
or |c: &Config| otry!(c.mining).gas_cap.clone(),
flag_extra_data: Option<String> = None,
or |c: &Config| otry!(c.mining).extra_data.clone().map(Some),
flag_tx_queue_size: usize = 1024usize,
or |c: &Config| otry!(c.mining).tx_queue_size.clone(),
flag_remove_solved: bool = false,
or |c: &Config| otry!(c.mining).remove_solved.clone(),
flag_notify_work: Option<String> = None,
or |c: &Config| otry!(c.mining).notify_work.clone().map(|vec| Some(vec.join(","))),
// -- Footprint Options
flag_tracing: String = "auto",
or |c: &Config| otry!(c.footprint).tracing.clone(),
flag_pruning: String = "auto",
or |c: &Config| otry!(c.footprint).pruning.clone(),
flag_cache_size_db: u32 = 64u32,
or |c: &Config| otry!(c.footprint).cache_size_db.clone(),
flag_cache_size_blocks: u32 = 8u32,
or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(),
flag_cache_size_queue: u32 = 50u32,
or |c: &Config| otry!(c.footprint).cache_size_queue.clone(),
flag_cache_size: Option<u32> = None,
or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some),
flag_fast_and_loose: bool = false,
or |c: &Config| otry!(c.footprint).fast_and_loose.clone(),
flag_db_compaction: String = "ssd",
or |c: &Config| otry!(c.footprint).db_compaction.clone(),
flag_fat_db: bool = false,
or |c: &Config| otry!(c.footprint).fat_db.clone(),
// -- Import/Export Options
flag_from: String = "1", or |_| None,
flag_to: String = "latest", or |_| None,
flag_format: Option<String> = None, or |_| None,
// -- Snapshot Optons
flag_at: String = "latest", or |_| None,
flag_no_periodic_snapshot: bool = false,
or |c: &Config| otry!(c.snapshots).disable_periodic.clone(),
// -- Virtual Machine Options
flag_jitvm: bool = false,
or |c: &Config| otry!(c.vm).jit.clone(),
// -- Miscellaneous Options
flag_config: String = "$HOME/.parity/config.toml", or |_| None,
flag_logging: Option<String> = None,
or |c: &Config| otry!(c.misc).logging.clone().map(Some),
flag_log_file: Option<String> = None,
or |c: &Config| otry!(c.misc).log_file.clone().map(Some),
flag_no_color: bool = false,
or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(),
}
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Config {
parity: Option<Operating>,
account: Option<Account>,
signer: Option<Signer>,
network: Option<Network>,
rpc: Option<Rpc>,
ipc: Option<Ipc>,
dapps: Option<Dapps>,
mining: Option<Mining>,
footprint: Option<Footprint>,
snapshots: Option<Snapshots>,
vm: Option<VM>,
misc: Option<Misc>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Operating {
mode: Option<String>,
mode_timeout: Option<u64>,
mode_alarm: Option<u64>,
chain: Option<String>,
db_path: Option<String>,
keys_path: Option<String>,
identity: Option<String>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Account {
unlock: Option<Vec<String>>,
password: Option<Vec<String>>,
keys_iterations: Option<u32>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Signer {
force: Option<bool>,
disable: Option<bool>,
port: Option<u16>,
interface: Option<String>,
path: Option<String>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Network {
disable: Option<bool>,
port: Option<u16>,
min_peers: Option<u16>,
max_peers: Option<u16>,
nat: Option<String>,
id: Option<String>,
bootnodes: Option<Vec<String>>,
discovery: Option<bool>,
node_key: Option<String>,
reserved_peers: Option<String>,
reserved_only: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Rpc {
disable: Option<bool>,
port: Option<u16>,
interface: Option<String>,
cors: Option<String>,
apis: Option<Vec<String>>,
hosts: Option<Vec<String>>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Ipc {
disable: Option<bool>,
path: Option<String>,
apis: Option<Vec<String>>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Dapps {
disable: Option<bool>,
port: Option<u16>,
interface: Option<String>,
hosts: Option<Vec<String>>,
path: Option<String>,
user: Option<String>,
pass: Option<String>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Mining {
author: Option<String>,
force_sealing: Option<bool>,
reseal_on_txs: Option<String>,
reseal_min_period: Option<u64>,
work_queue_size: Option<usize>,
tx_gas_limit: Option<String>,
relay_set: Option<String>,
usd_per_tx: Option<String>,
usd_per_eth: Option<String>,
price_update_period: Option<String>,
gas_floor_target: Option<String>,
gas_cap: Option<String>,
extra_data: Option<String>,
tx_queue_size: Option<usize>,
remove_solved: Option<bool>,
notify_work: Option<Vec<String>>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Footprint {
tracing: Option<String>,
pruning: Option<String>,
fast_and_loose: Option<bool>,
cache_size: Option<u32>,
cache_size_db: Option<u32>,
cache_size_blocks: Option<u32>,
cache_size_queue: Option<u32>,
db_compaction: Option<String>,
fat_db: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Snapshots {
disable_periodic: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct VM {
jit: Option<bool>,
}
#[derive(Default, Debug, PartialEq, RustcDecodable)]
struct Misc {
logging: Option<String>,
log_file: Option<String>,
color: Option<bool>,
}
#[cfg(test)]
mod tests {
use super::{
Args, ArgsError,
Config, Operating, Account, Signer, Network, Rpc, Ipc, Dapps, Mining, Footprint, Snapshots, VM, Misc
};
use toml;
#[test]
fn should_parse_args_and_include_config() {
// given
let mut config = Config::default();
let mut operating = Operating::default();
operating.chain = Some("morden".into());
config.parity = Some(operating);
// when
let args = Args::parse_with_config(&["parity"], config).unwrap();
// then
assert_eq!(args.flag_chain, "morden".to_owned());
}
#[test]
fn should_not_use_config_if_cli_is_provided() {
// given
let mut config = Config::default();
let mut operating = Operating::default();
operating.chain = Some("morden".into());
config.parity = Some(operating);
// when
let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap();
// then
assert_eq!(args.flag_chain, "xyz".to_owned());
}
#[test]
fn should_parse_full_config() {
// given
let config = toml::decode_str(include_str!("./config.full.toml")).unwrap();
// when
let args = Args::parse_with_config(&["parity", "--chain", "xyz"], config).unwrap();
// then
assert_eq!(args, Args {
// Commands
cmd_daemon: false,
cmd_wallet: false,
cmd_account: false,
cmd_new: false,
cmd_list: false,
cmd_export: false,
cmd_import: false,
cmd_signer: false,
cmd_new_token: false,
cmd_snapshot: false,
cmd_restore: false,
cmd_ui: false,
// Arguments
arg_pid_file: "".into(),
arg_file: None,
arg_path: vec![],
// -- Operating Options
flag_mode: "active".into(),
flag_mode_timeout: 300u64,
flag_mode_alarm: 3600u64,
flag_chain: "xyz".into(),
flag_db_path: "$HOME/.parity".into(),
flag_keys_path: "$HOME/.parity/keys".into(),
flag_identity: "".into(),
// -- Account Options
flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()),
flag_password: vec!["~/.safe/password.file".into()],
flag_keys_iterations: 10240u32,
flag_force_signer: false,
flag_no_signer: false,
flag_signer_port: 8180u16,
flag_signer_interface: "127.0.0.1".into(),
flag_signer_path: "$HOME/.parity/signer".into(),
flag_signer_no_validation: false,
// -- Networking Options
flag_no_network: false,
flag_port: 30303u16,
flag_min_peers: 25u16,
flag_max_peers: 50u16,
flag_nat: "any".into(),
flag_network_id: Some("0x1".into()),
flag_bootnodes: Some("".into()),
flag_no_discovery: false,
flag_node_key: None,
flag_reserved_peers: Some("./path_to_file".into()),
flag_reserved_only: false,
// -- API and Console Options
// RPC
flag_no_jsonrpc: false,
flag_jsonrpc_port: 8545u16,
flag_jsonrpc_interface: "local".into(),
flag_jsonrpc_cors: Some("null".into()),
flag_jsonrpc_apis: "web3,eth,net,personal,ethcore,traces,rpc".into(),
flag_jsonrpc_hosts: "none".into(),
// IPC
flag_no_ipc: false,
flag_ipc_path: "$HOME/.parity/jsonrpc.ipc".into(),
flag_ipc_apis: "web3,eth,net,personal,ethcore,traces,rpc".into(),
// DAPPS
flag_no_dapps: false,
flag_dapps_port: 8080u16,
flag_dapps_interface: "local".into(),
flag_dapps_hosts: "none".into(),
flag_dapps_path: "$HOME/.parity/dapps".into(),
flag_dapps_user: Some("test_user".into()),
flag_dapps_pass: Some("test_pass".into()),
// -- Sealing/Mining Options
flag_author: Some("0xdeadbeefcafe0000000000000000000000000001".into()),
flag_force_sealing: true,
flag_reseal_on_txs: "all".into(),
flag_reseal_min_period: 4000u64,
flag_work_queue_size: 20usize,
flag_tx_gas_limit: Some("6283184".into()),
flag_relay_set: "cheap".into(),
flag_usd_per_tx: "0".into(),
flag_usd_per_eth: "auto".into(),
flag_price_update_period: "hourly".into(),
flag_gas_floor_target: "4700000".into(),
flag_gas_cap: "6283184".into(),
flag_extra_data: Some("Parity".into()),
flag_tx_queue_size: 1024usize,
flag_remove_solved: false,
flag_notify_work: Some("http://localhost:3001".into()),
// -- Footprint Options
flag_tracing: "auto".into(),
flag_pruning: "auto".into(),
flag_cache_size_db: 64u32,
flag_cache_size_blocks: 8u32,
flag_cache_size_queue: 50u32,
flag_cache_size: Some(128),
flag_fast_and_loose: false,
flag_db_compaction: "ssd".into(),
flag_fat_db: false,
// -- Import/Export Options
flag_from: "1".into(),
flag_to: "latest".into(),
flag_format: None,
// -- Snapshot Optons
flag_at: "latest".into(),
flag_no_periodic_snapshot: false,
// -- Virtual Machine Options
flag_jitvm: false,
// -- Legacy Options
flag_geth: false,
flag_testnet: false,
flag_import_geth_keys: false,
flag_datadir: None,
flag_networkid: None,
flag_peers: None,
flag_nodekey: None,
flag_nodiscover: false,
flag_jsonrpc: false,
flag_jsonrpc_off: false,
flag_webapp: false,
flag_dapps_off: false,
flag_rpc: false,
flag_rpcaddr: None,
flag_rpcport: None,
flag_rpcapi: None,
flag_rpccorsdomain: None,
flag_ipcdisable: false,
flag_ipc_off: false,
flag_ipcapi: None,
flag_ipcpath: None,
flag_gasprice: None,
flag_etherbase: None,
flag_extradata: None,
flag_cache: None,
// -- Miscellaneous Options
flag_version: false,
flag_config: "$HOME/.parity/config.toml".into(),
flag_logging: Some("own_tx=trace".into()),
flag_log_file: Some("/var/log/parity.log".into()),
flag_no_color: false,
flag_no_config: false,
});
}
#[test]
fn should_parse_config_and_return_errors() {
let config1 = Args::parse_config(include_str!("./config.invalid1.toml"));
let config2 = Args::parse_config(include_str!("./config.invalid2.toml"));
match (config1, config2) {
(Err(ArgsError::Parsing(_)), Err(ArgsError::Decode(_))) => {},
(a, b) => {
assert!(false, "Got invalid error types: {:?}, {:?}", a, b);
}
}
}
#[test]
fn should_deserialize_toml_file() {
let config: Config = toml::decode_str(include_str!("./config.toml")).unwrap();
assert_eq!(config, Config {
parity: Some(Operating {
mode: Some("dark".into()),
mode_timeout: Some(15u64),
mode_alarm: Some(10u64),
chain: Some("./chain.json".into()),
db_path: None,
keys_path: None,
identity: None,
}),
account: Some(Account {
unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]),
password: Some(vec!["passwdfile path".into()]),
keys_iterations: None,
}),
signer: Some(Signer {
force: None,
disable: Some(true),
port: None,
interface: None,
path: None,
}),
network: Some(Network {
disable: Some(false),
port: None,
min_peers: Some(10),
max_peers: Some(20),
nat: Some("any".into()),
id: None,
bootnodes: None,
discovery: Some(true),
node_key: None,
reserved_peers: Some("./path/to/reserved_peers".into()),
reserved_only: Some(true),
}),
rpc: Some(Rpc {
disable: Some(true),
port: Some(8180),
interface: None,
cors: None,
apis: None,
hosts: None,
}),
ipc: Some(Ipc {
disable: None,
path: None,
apis: Some(vec!["rpc".into(), "eth".into()]),
}),
dapps: Some(Dapps {
disable: None,
port: Some(8080),
path: None,
interface: None,
hosts: None,
user: Some("username".into()),
pass: Some("password".into())
}),
mining: Some(Mining {
author: Some("0xdeadbeefcafe0000000000000000000000000001".into()),
force_sealing: Some(true),
reseal_on_txs: Some("all".into()),
reseal_min_period: Some(4000),
work_queue_size: None,
relay_set: None,
usd_per_tx: None,
usd_per_eth: None,
price_update_period: Some("hourly".into()),
gas_floor_target: None,
gas_cap: None,
tx_queue_size: Some(2048),
tx_gas_limit: None,
extra_data: None,
remove_solved: None,
notify_work: None,
}),
footprint: Some(Footprint {
tracing: Some("on".into()),
pruning: Some("fast".into()),
fast_and_loose: None,
cache_size: None,
cache_size_db: Some(128),
cache_size_blocks: Some(16),
cache_size_queue: Some(100),
db_compaction: Some("ssd".into()),
fat_db: Some(true),
}),
snapshots: Some(Snapshots {
disable_periodic: Some(true),
}),
vm: Some(VM {
jit: Some(false),
}),
misc: Some(Misc {
logging: Some("own_tx=trace".into()),
log_file: Some("/var/log/parity.log".into()),
color: Some(true),
})
});
}
}

213
parity/cli/usage.rs Normal file
View File

@ -0,0 +1,213 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
macro_rules! otry {
($e: expr) => (
match $e {
Some(ref v) => v,
None => {
return None;
}
}
)
}
macro_rules! usage {
(
{
$(
$field_a:ident : $typ_a:ty,
)*
}
{
$(
$field:ident : $typ:ty = $default:expr, or $from_config:expr,
)*
}
) => {
use toml;
use std::{fs, io, process};
use std::io::Read;
use util::version;
use docopt::{Docopt, Error as DocoptError};
use helpers::replace_home;
use rustc_serialize;
#[derive(Debug)]
pub enum ArgsError {
Docopt(DocoptError),
Parsing(Vec<toml::ParserError>),
Decode(toml::DecodeError),
Config(String, io::Error),
}
impl ArgsError {
pub fn exit(self) -> ! {
match self {
ArgsError::Docopt(e) => e.exit(),
ArgsError::Parsing(errors) => {
println!("There is an error in config file.");
for e in &errors {
println!("{}", e);
}
process::exit(2)
},
ArgsError::Decode(e) => {
println!("You might have supplied invalid parameters in config file.");
println!("{}", e);
process::exit(2)
},
ArgsError::Config(path, e) => {
println!("There was an error reading your config file at: {}", path);
println!("{}", e);
process::exit(2)
}
}
}
}
impl From<DocoptError> for ArgsError {
fn from(e: DocoptError) -> Self { ArgsError::Docopt(e) }
}
impl From<toml::DecodeError> for ArgsError {
fn from(e: toml::DecodeError) -> Self { ArgsError::Decode(e) }
}
#[derive(Debug, PartialEq)]
pub struct Args {
$(
pub $field_a: $typ_a,
)*
$(
pub $field: $typ,
)*
}
impl Default for Args {
fn default() -> Self {
Args {
$(
$field_a: Default::default(),
)*
$(
$field: $default.into(),
)*
}
}
}
#[derive(Default, Debug, PartialEq, Clone, RustcDecodable)]
struct RawArgs {
$(
$field_a: $typ_a,
)*
$(
$field: Option<$typ>,
)*
}
impl Args {
pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
let raw_args = try!(RawArgs::parse(command));
// Skip loading config file if no_config flag is specified
if raw_args.flag_no_config {
return Ok(raw_args.into_args(Config::default()));
}
let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config);
let config_file = replace_home(&config_file);
let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) {
// Load config file
(Ok(mut file), _) => {
println!("Loading config file from {}", &config_file);
let mut config = String::new();
try!(file.read_to_string(&mut config).map_err(|e| ArgsError::Config(config_file, e)));
try!(Self::parse_config(&config))
},
// Don't display error in case default config cannot be loaded.
(Err(_), false) => Config::default(),
// Config set from CLI (fail with error)
(Err(e), true) => {
return Err(ArgsError::Config(config_file, e));
},
};
Ok(raw_args.into_args(config))
}
#[cfg(test)]
pub fn parse_without_config<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
Self::parse_with_config(command, Config::default())
}
#[cfg(test)]
fn parse_with_config<S: AsRef<str>>(command: &[S], config: Config) -> Result<Self, ArgsError> {
Ok(try!(RawArgs::parse(command)).into_args(config))
}
fn parse_config(config: &str) -> Result<Config, ArgsError> {
let mut value_parser = toml::Parser::new(&config);
match value_parser.parse() {
Some(value) => {
let result = rustc_serialize::Decodable::decode(&mut toml::Decoder::new(toml::Value::Table(value)));
match result {
Ok(config) => Ok(config),
Err(e) => Err(e.into()),
}
},
None => Err(ArgsError::Parsing(value_parser.errors)),
}
}
pub fn print_version() -> String {
format!(include_str!("./version.txt"), version())
}
}
impl RawArgs {
fn into_args(self, config: Config) -> Args {
let mut args = Args::default();
$(
args.$field_a = self.$field_a;
)*
$(
args.$field = self.$field.or_else(|| $from_config(&config)).unwrap_or_else(|| $default.into());
)*
args
}
pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, DocoptError> {
Docopt::new(Self::usage()).and_then(|d| d.argv(command).decode())
}
fn usage() -> String {
format!(
include_str!("./usage.txt"),
$(
$field={ let v: $typ = $default.into(); v },
// Uncomment this to debug
// "named argument never used" error
// $field = $default,
)*
)
}
}
};
}

View File

@ -1,23 +1,3 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::version;
use docopt::Docopt;
pub const USAGE: &'static str = r#"
Parity. Ethereum Client. Parity. Ethereum Client.
By Wood/Paronyan/Kotewicz/Drwięga/Volf et al. By Wood/Paronyan/Kotewicz/Drwięga/Volf et al.
Copyright 2015, 2016 Ethcore (UK) Limited Copyright 2015, 2016 Ethcore (UK) Limited
@ -41,134 +21,140 @@ Operating Options:
passive - Parity syncs initially, then sleeps and passive - Parity syncs initially, then sleeps and
wakes regularly to resync. wakes regularly to resync.
dark - Parity syncs only when an external interface dark - Parity syncs only when an external interface
is active. [default: active]. is active. (default: {flag_mode}).
--mode-timeout SECS Specify the number of seconds before inactivity --mode-timeout SECS Specify the number of seconds before inactivity
timeout occurs when mode is dark or passive timeout occurs when mode is dark or passive
[default: 300]. (default: {flag_mode_timeout}).
--mode-alarm SECS Specify the number of seconds before auto sleep --mode-alarm SECS Specify the number of seconds before auto sleep
reawake timeout occurs when mode is passive reawake timeout occurs when mode is passive
[default: 3600]. (default: {flag_mode_alarm}).
--chain CHAIN Specify the blockchain type. CHAIN may be either a --chain CHAIN Specify the blockchain type. CHAIN may be either a
JSON chain specification file or olympic, frontier, JSON chain specification file or olympic, frontier,
homestead, mainnet, morden, classic or testnet homestead, mainnet, morden, classic or testnet
[default: homestead]. (default: {flag_chain}).
-d --db-path PATH Specify the database & configuration directory path -d --db-path PATH Specify the database & configuration directory path
[default: $HOME/.parity]. (default: {flag_db_path}).
--keys-path PATH Specify the path for JSON key files to be found --keys-path PATH Specify the path for JSON key files to be found
[default: $HOME/.parity/keys]. (default: {flag_keys_path}).
--identity NAME Specify your node's name. --identity NAME Specify your node's name. (default: {flag_identity})
Account Options: Account Options:
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
ACCOUNTS is a comma-delimited list of addresses. ACCOUNTS is a comma-delimited list of addresses.
Implies --no-signer. Implies --no-signer. (default: {flag_unlock:?})
--password FILE Provide a file containing a password for unlocking --password FILE Provide a file containing a password for unlocking
an account. an account. (default: {flag_password:?})
--keys-iterations NUM Specify the number of iterations to use when --keys-iterations NUM Specify the number of iterations to use when
deriving key from the password (bigger is more deriving key from the password (bigger is more
secure) [default: 10240]. secure) (default: {flag_keys_iterations}).
--force-signer Enable Trusted Signer WebSocket endpoint used by --force-signer Enable Trusted Signer WebSocket endpoint used by
Signer UIs, even when --unlock is in use. Signer UIs, even when --unlock is in use.
(default: ${flag_force_signer})
--no-signer Disable Trusted Signer WebSocket endpoint used by --no-signer Disable Trusted Signer WebSocket endpoint used by
Signer UIs. Signer UIs. (default: ${flag_no_signer})
--signer-port PORT Specify the port of Trusted Signer server --signer-port PORT Specify the port of Trusted Signer server
[default: 8180]. (default: {flag_signer_port}).
--signer-interface IP Specify the hostname portion of the Trusted Signer --signer-interface IP Specify the hostname portion of the Trusted Signer
server, IP should be an interface's IP address, server, IP should be an interface's IP address,
or local [default: local]. or local (default: {flag_signer_interface}).
--signer-path PATH Specify directory where Signer UIs tokens should --signer-path PATH Specify directory where Signer UIs tokens should
be stored. [default: $HOME/.parity/signer] be stored. (default: {flag_signer_path})
--signer-no-validation Disable Origin and Host headers validation for --signer-no-validation Disable Origin and Host headers validation for
Trusted Signer. WARNING: INSECURE. Used only for Trusted Signer. WARNING: INSECURE. Used only for
development. development. (default: {flag_signer_no_validation})
Networking Options: Networking Options:
--no-network Disable p2p networking. --no-network Disable p2p networking. (default: {flag_no_network})
--port PORT Override the port on which the node should listen --port PORT Override the port on which the node should listen
[default: 30303]. (default: {flag_port}).
--min-peers NUM Try to maintain at least NUM peers [default: 25]. --min-peers NUM Try to maintain at least NUM peers (default: {flag_min_peers}).
--max-peers NUM Allow up to that many peers [default: 50]. --max-peers NUM Allow up to that many peers (default: {flag_max_peers}).
--nat METHOD Specify method to use for determining public --nat METHOD Specify method to use for determining public
address. Must be one of: any, none, upnp, address. Must be one of: any, none, upnp,
extip:<IP> [default: any]. extip:<IP> (default: {flag_nat}).
--network-id INDEX Override the network identifier from the chain we --network-id INDEX Override the network identifier from the chain we
are on. are on. (default: {flag_network_id:?})
--bootnodes NODES Override the bootnodes from our chain. NODES should --bootnodes NODES Override the bootnodes from our chain. NODES should
be comma-delimited enodes. be comma-delimited enodes. (default: {flag_bootnodes:?})
--no-discovery Disable new peer discovery. --no-discovery Disable new peer discovery. (default: {flag_no_discovery})
--node-key KEY Specify node secret key, either as 64-character hex --node-key KEY Specify node secret key, either as 64-character hex
string or input to SHA3 operation. string or input to SHA3 operation. (default: {flag_node_key:?})
--reserved-peers FILE Provide a file containing enodes, one per line. --reserved-peers FILE Provide a file containing enodes, one per line.
These nodes will always have a reserved slot on top These nodes will always have a reserved slot on top
of the normal maximum peers. of the normal maximum peers. (default: {flag_reserved_peers:?})
--reserved-only Connect only to reserved nodes. --reserved-only Connect only to reserved nodes. (default: {flag_reserved_only})
API and Console Options: API and Console Options:
--no-jsonrpc Disable the JSON-RPC API server. --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc})
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server --jsonrpc-port PORT Specify the port portion of the JSONRPC API server
[default: 8545]. (default: {flag_jsonrpc_port}).
--jsonrpc-interface IP Specify the hostname portion of the JSONRPC API --jsonrpc-interface IP Specify the hostname portion of the JSONRPC API
server, IP should be an interface's IP address, or server, IP should be an interface's IP address, or
all (all interfaces) or local [default: local]. all (all interfaces) or local (default: {flag_jsonrpc_interface}).
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses.
(default: {flag_jsonrpc_cors:?})
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC --jsonrpc-apis APIS Specify the APIs available through the JSONRPC
interface. APIS is a comma-delimited list of API interface. APIS is a comma-delimited list of API
name. Possible name are web3, eth, net, personal, name. Possible name are web3, eth, net, personal,
ethcore, ethcore_set, traces, rpc. ethcore, ethcore_set, traces, rpc.
[default: web3,eth,net,ethcore,personal,traces,rpc]. (default: {flag_jsonrpc_apis}).
--jsonrpc-hosts HOSTS List of allowed Host header values. This option will --jsonrpc-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it validate the Host header sent by the browser, it
is additional security against some attack is additional security against some attack
vectors. Special options: "all", "none", vectors. Special options: "all", "none",
[default: none]. (default: {flag_jsonrpc_hosts}).
--no-ipc Disable JSON-RPC over IPC service. --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc})
--ipc-path PATH Specify custom path for JSON-RPC over IPC service --ipc-path PATH Specify custom path for JSON-RPC over IPC service
[default: $HOME/.parity/jsonrpc.ipc]. (default: {flag_ipc_path}).
--ipc-apis APIS Specify custom API set available via JSON-RPC over --ipc-apis APIS Specify custom API set available via JSON-RPC over
IPC [default: web3,eth,net,ethcore,personal,traces,rpc]. IPC (default: {flag_ipc_apis}).
--no-dapps Disable the Dapps server (e.g. status page). --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps})
--dapps-port PORT Specify the port portion of the Dapps server --dapps-port PORT Specify the port portion of the Dapps server
[default: 8080]. (default: {flag_dapps_port}).
--dapps-interface IP Specify the hostname portion of the Dapps --dapps-interface IP Specify the hostname portion of the Dapps
server, IP should be an interface's IP address, server, IP should be an interface's IP address,
or local [default: local]. or local (default: {flag_dapps_interface}).
--dapps-hosts HOSTS List of allowed Host header values. This option will --dapps-hosts HOSTS List of allowed Host header values. This option will
validate the Host header sent by the browser, it validate the Host header sent by the browser, it
is additional security against some attack is additional security against some attack
vectors. Special options: "all", "none", vectors. Special options: "all", "none",
[default: none]. (default: {flag_dapps_hosts}).
--dapps-user USERNAME Specify username for Dapps server. It will be --dapps-user USERNAME Specify username for Dapps server. It will be
used in HTTP Basic Authentication Scheme. used in HTTP Basic Authentication Scheme.
If --dapps-pass is not specified you will be If --dapps-pass is not specified you will be
asked for password on startup. asked for password on startup. (default: {flag_dapps_user:?})
--dapps-pass PASSWORD Specify password for Dapps server. Use only in --dapps-pass PASSWORD Specify password for Dapps server. Use only in
conjunction with --dapps-user. conjunction with --dapps-user. (default: {flag_dapps_pass:?})
--dapps-path PATH Specify directory where dapps should be installed. --dapps-path PATH Specify directory where dapps should be installed.
[default: $HOME/.parity/dapps] (default: {flag_dapps_path})
Sealing/Mining Options: Sealing/Mining Options:
--author ADDRESS Specify the block author (aka "coinbase") address --author ADDRESS Specify the block author (aka "coinbase") address
for sending block rewards from sealed blocks. for sending block rewards from sealed blocks.
NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION. NOTE: MINING WILL NOT WORK WITHOUT THIS OPTION.
(default: {flag_author:?})
--force-sealing Force the node to author new blocks as if it were --force-sealing Force the node to author new blocks as if it were
always sealing/mining. always sealing/mining.
(default: {flag_force_sealing})
--reseal-on-txs SET Specify which transactions should force the node --reseal-on-txs SET Specify which transactions should force the node
to reseal a block. SET is one of: to reseal a block. SET is one of:
none - never reseal on new transactions; none - never reseal on new transactions;
own - reseal only on a new local transaction; own - reseal only on a new local transaction;
ext - reseal only on a new external transaction; ext - reseal only on a new external transaction;
all - reseal on all new transactions [default: own]. all - reseal on all new transactions
(default: {flag_reseal_on_txs}).
--reseal-min-period MS Specify the minimum time between reseals from --reseal-min-period MS Specify the minimum time between reseals from
incoming transactions. MS is time measured in incoming transactions. MS is time measured in
milliseconds [default: 2000]. milliseconds (default: {flag_reseal_min_period}).
--work-queue-size ITEMS Specify the number of historical work packages --work-queue-size ITEMS Specify the number of historical work packages
which are kept cached lest a solution is found for which are kept cached lest a solution is found for
them later. High values take more memory but result them later. High values take more memory but result
in fewer unusable solutions [default: 20]. in fewer unusable solutions (default: {flag_work_queue_size}).
--tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas --tx-gas-limit GAS Apply a limit of GAS as the maximum amount of gas
a single transaction may have for it to be mined. a single transaction may have for it to be mined.
(default: {flag_tx_gas_limit:?})
--relay-set SET Set of transactions to relay. SET may be: --relay-set SET Set of transactions to relay. SET may be:
cheap - Relay any transaction in the queue (this cheap - Relay any transaction in the queue (this
may include invalid transactions); may include invalid transactions);
@ -176,78 +162,81 @@ Sealing/Mining Options:
guarantees we don't relay invalid transactions, but guarantees we don't relay invalid transactions, but
means we relay nothing if not mining); means we relay nothing if not mining);
lenient - Same as strict when mining, and cheap lenient - Same as strict when mining, and cheap
when not [default: cheap]. when not (default: {flag_relay_set}).
--usd-per-tx USD Amount of USD to be paid for a basic transaction --usd-per-tx USD Amount of USD to be paid for a basic transaction
[default: 0]. The minimum gas price is set (default: {flag_usd_per_tx}). The minimum gas price is set
accordingly. accordingly.
--usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an --usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an
amount in USD, a web service or 'auto' to use each amount in USD, a web service or 'auto' to use each
web service in turn and fallback on the last known web service in turn and fallback on the last known
good value [default: auto]. good value (default: {flag_usd_per_eth}).
--price-update-period T T will be allowed to pass between each gas price --price-update-period T T will be allowed to pass between each gas price
update. T may be daily, hourly, a number of seconds, update. T may be daily, hourly, a number of seconds,
or a time string of the form "2 days", "30 minutes" or a time string of the form "2 days", "30 minutes"
etc. [default: hourly]. etc. (default: {flag_price_update_period}).
--gas-floor-target GAS Amount of gas per block to target when sealing a new --gas-floor-target GAS Amount of gas per block to target when sealing a new
block [default: 4700000]. block (default: {flag_gas_floor_target}).
--gas-cap GAS A cap on how large we will raise the gas limit per --gas-cap GAS A cap on how large we will raise the gas limit per
block due to transaction volume [default: 6283184]. block due to transaction volume (default: {flag_gas_cap}).
--extra-data STRING Specify a custom extra-data for authored blocks, no --extra-data STRING Specify a custom extra-data for authored blocks, no
more than 32 characters. more than 32 characters. (default: {flag_extra_data:?})
--tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting --tx-queue-size LIMIT Maximum amount of transactions in the queue (waiting
to be included in next block) [default: 1024]. to be included in next block) (default: {flag_tx_queue_size}).
--remove-solved Move solved blocks from the work package queue --remove-solved Move solved blocks from the work package queue
instead of cloning them. This gives a slightly instead of cloning them. This gives a slightly
faster import speed, but means that extra solutions faster import speed, but means that extra solutions
submitted for the same work package will go unused. submitted for the same work package will go unused.
(default: {flag_remove_solved})
--notify-work URLS URLs to which work package notifications are pushed. --notify-work URLS URLs to which work package notifications are pushed.
URLS should be a comma-delimited list of HTTP URLs. URLS should be a comma-delimited list of HTTP URLs.
(default: {flag_notify_work:?})
Footprint Options: Footprint Options:
--tracing BOOL Indicates if full transaction tracing should be --tracing BOOL Indicates if full transaction tracing should be
enabled. Works only if client had been fully synced enabled. Works only if client had been fully synced
with tracing enabled. BOOL may be one of auto, on, with tracing enabled. BOOL may be one of auto, on,
off. auto uses last used value of this option (off off. auto uses last used value of this option (off
if it does not exist) [default: auto]. if it does not exist) (default: {flag_tracing}).
--pruning METHOD Configure pruning of the state/storage trie. METHOD --pruning METHOD Configure pruning of the state/storage trie. METHOD
may be one of auto, archive, fast: may be one of auto, archive, fast:
archive - keep all state trie data. No pruning. archive - keep all state trie data. No pruning.
fast - maintain journal overlay. Fast but 50MB used. fast - maintain journal overlay. Fast but 50MB used.
auto - use the method most recently synced or auto - use the method most recently synced or
default to fast if none synced [default: auto]. default to fast if none synced (default: {flag_pruning}).
--cache-size-db MB Override database cache size [default: 64]. --cache-size-db MB Override database cache size (default: {flag_cache_size_db}).
--cache-size-blocks MB Specify the prefered size of the blockchain cache in --cache-size-blocks MB Specify the prefered size of the blockchain cache in
megabytes [default: 8]. megabytes (default: {flag_cache_size_blocks}).
--cache-size-queue MB Specify the maximum size of memory to use for block --cache-size-queue MB Specify the maximum size of memory to use for block
queue [default: 50]. queue (default: {flag_cache_size_queue}).
--cache-size MB Set total amount of discretionary memory to use for --cache-size MB Set total amount of discretionary memory to use for
the entire system, overrides other cache and queue the entire system, overrides other cache and queue
options. options.a (default: {flag_cache_size:?})
--fast-and-loose Disables DB WAL, which gives a significant speed up --fast-and-loose Disables DB WAL, which gives a significant speed up
but means an unclean exit is unrecoverable. but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
--db-compaction TYPE Database compaction type. TYPE may be one of: --db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs; ssd - suitable for SSDs and fast HDDs;
hdd - suitable for slow HDDs [default: ssd]. hdd - suitable for slow HDDs (default: {flag_db_compaction}).
--fat-db Fat database. --fat-db Fat database. (default: {flag_fat_db})
Import/Export Options: Import/Export Options:
--from BLOCK Export from block BLOCK, which may be an index or --from BLOCK Export from block BLOCK, which may be an index or
hash [default: 1]. hash (default: {flag_from}).
--to BLOCK Export to (including) block BLOCK, which may be an --to BLOCK Export to (including) block BLOCK, which may be an
index, hash or 'latest' [default: latest]. index, hash or 'latest' (default: {flag_to}).
--format FORMAT For import/export in given format. FORMAT must be --format FORMAT For import/export in given format. FORMAT must be
one of 'hex' and 'binary'. one of 'hex' and 'binary'.
(default: {flag_format:?} = Import: auto, Export: binary)
Snapshot Options: Snapshot Options:
--at BLOCK Take a snapshot at the given block, which may be an --at BLOCK Take a snapshot at the given block, which may be an
index, hash, or 'latest'. Note that taking snapshots at index, hash, or 'latest'. Note that taking snapshots at
non-recent blocks will only work with --pruning archive non-recent blocks will only work with --pruning archive
[default: latest] (default: {flag_at})
--no-periodic-snapshot Disable automated snapshots which usually occur once --no-periodic-snapshot Disable automated snapshots which usually occur once
every 10000 blocks. every 10000 blocks. (default: {flag_no_periodic_snapshot})
Virtual Machine Options: Virtual Machine Options:
--jitvm Enable the JIT VM. --jitvm Enable the JIT VM. (default: {flag_jitvm})
Legacy Options: Legacy Options:
--geth Run in Geth-compatibility mode. Sets the IPC path --geth Run in Geth-compatibility mode. Sets the IPC path
@ -284,156 +273,14 @@ Legacy Options:
--cache MB Equivalent to --cache-size MB. --cache MB Equivalent to --cache-size MB.
Miscellaneous Options: Miscellaneous Options:
-c --config CONFIG Specify a filename containing a configuration file.
(default: {flag_config})
-l --logging LOGGING Specify the logging level. Must conform to the same -l --logging LOGGING Specify the logging level. Must conform to the same
format as RUST_LOG. format as RUST_LOG. (default: {flag_logging:?})
--log-file FILENAME Specify a filename into which logging should be --log-file FILENAME Specify a filename into which logging should be
directed. directed. (default: {flag_log_file:?})
--no-color Don't use terminal color codes in output. --no-config Don't load a configuration file.
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
-v --version Show information about version. -v --version Show information about version.
-h --help Show this screen. -h --help Show this screen.
"#;
#[derive(Debug, PartialEq, RustcDecodable)]
pub struct Args {
pub cmd_daemon: bool,
pub cmd_account: bool,
pub cmd_wallet: bool,
pub cmd_new: bool,
pub cmd_list: bool,
pub cmd_export: bool,
pub cmd_import: bool,
pub cmd_signer: bool,
pub cmd_new_token: bool,
pub cmd_snapshot: bool,
pub cmd_restore: bool,
pub cmd_ui: bool,
pub arg_pid_file: String,
pub arg_file: Option<String>,
pub arg_path: Vec<String>,
pub flag_mode: String,
pub flag_mode_timeout: u64,
pub flag_mode_alarm: u64,
pub flag_chain: String,
pub flag_db_path: String,
pub flag_identity: String,
pub flag_unlock: Option<String>,
pub flag_password: Vec<String>,
pub flag_keys_path: String,
pub flag_keys_iterations: u32,
pub flag_import_geth_keys: bool,
pub flag_bootnodes: Option<String>,
pub flag_network_id: Option<String>,
pub flag_pruning: String,
pub flag_tracing: String,
pub flag_port: u16,
pub flag_min_peers: u16,
pub flag_max_peers: u16,
pub flag_no_discovery: bool,
pub flag_nat: String,
pub flag_node_key: Option<String>,
pub flag_reserved_peers: Option<String>,
pub flag_reserved_only: bool,
pub flag_cache_size_db: u32,
pub flag_cache_size_blocks: u32,
pub flag_cache_size_queue: u32,
pub flag_cache_size: Option<u32>,
pub flag_cache: Option<u32>,
pub flag_fast_and_loose: bool,
pub flag_no_jsonrpc: bool,
pub flag_jsonrpc_interface: String,
pub flag_jsonrpc_port: u16,
pub flag_jsonrpc_cors: Option<String>,
pub flag_jsonrpc_hosts: String,
pub flag_jsonrpc_apis: String,
pub flag_no_ipc: bool,
pub flag_ipc_path: String,
pub flag_ipc_apis: String,
pub flag_no_dapps: bool,
pub flag_dapps_port: u16,
pub flag_dapps_interface: String,
pub flag_dapps_hosts: String,
pub flag_dapps_user: Option<String>,
pub flag_dapps_pass: Option<String>,
pub flag_dapps_path: String,
pub flag_force_signer: bool,
pub flag_no_signer: bool,
pub flag_signer_port: u16,
pub flag_signer_interface: String,
pub flag_signer_path: String,
pub flag_signer_no_validation: bool,
pub flag_force_sealing: bool,
pub flag_reseal_on_txs: String,
pub flag_reseal_min_period: u64,
pub flag_work_queue_size: usize,
pub flag_remove_solved: bool,
pub flag_tx_gas_limit: Option<String>,
pub flag_relay_set: String,
pub flag_author: Option<String>,
pub flag_usd_per_tx: String,
pub flag_usd_per_eth: String,
pub flag_price_update_period: String,
pub flag_gas_floor_target: String,
pub flag_gas_cap: String,
pub flag_extra_data: Option<String>,
pub flag_tx_queue_size: usize,
pub flag_notify_work: Option<String>,
pub flag_logging: Option<String>,
pub flag_version: bool,
pub flag_from: String,
pub flag_to: String,
pub flag_at: String,
pub flag_no_periodic_snapshot: bool,
pub flag_format: Option<String>,
pub flag_jitvm: bool,
pub flag_log_file: Option<String>,
pub flag_no_color: bool,
pub flag_no_network: bool,
// legacy...
pub flag_geth: bool,
pub flag_nodekey: Option<String>,
pub flag_nodiscover: bool,
pub flag_peers: Option<u16>,
pub flag_datadir: Option<String>,
pub flag_extradata: Option<String>,
pub flag_etherbase: Option<String>,
pub flag_gasprice: Option<String>,
pub flag_jsonrpc: bool,
pub flag_webapp: bool,
pub flag_rpc: bool,
pub flag_rpcaddr: Option<String>,
pub flag_rpcport: Option<u16>,
pub flag_rpccorsdomain: Option<String>,
pub flag_rpcapi: Option<String>,
pub flag_testnet: bool,
pub flag_networkid: Option<String>,
pub flag_ipcdisable: bool,
pub flag_ipc_off: bool,
pub flag_jsonrpc_off: bool,
pub flag_dapps_off: bool,
pub flag_ipcpath: Option<String>,
pub flag_ipcapi: Option<String>,
pub flag_db_compaction: String,
pub flag_fat_db: bool,
}
impl Default for Args {
fn default() -> Self {
Docopt::new(USAGE).unwrap().argv(&[] as &[&str]).decode().unwrap()
}
}
pub fn print_version() -> String {
format!("\
Parity
version {}
Copyright 2015, 2016 Ethcore (UK) Limited
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
By Wood/Paronyan/Kotewicz/Drwięga/Volf.\
", version())
}

9
parity/cli/version.txt Normal file
View File

@ -0,0 +1,9 @@
Parity
version {}
Copyright 2015, 2016 Ethcore (UK) Limited
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
By Wood/Paronyan/Kotewicz/Drwięga/Volf.

View File

@ -19,8 +19,7 @@ use std::io::Read;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::PathBuf; use std::path::PathBuf;
use std::cmp::max; use std::cmp::max;
use cli::{USAGE, Args}; use cli::{Args, ArgsError};
use docopt::{Docopt, Error as DocoptError};
use util::{Hashable, U256, Uint, Bytes, version_data, Secret, Address}; use util::{Hashable, U256, Uint, Bytes, version_data, Secret, Address};
use util::log::Colour; use util::log::Colour;
use ethsync::{NetworkConfiguration, is_valid_node_url}; use ethsync::{NetworkConfiguration, is_valid_node_url};
@ -60,8 +59,8 @@ pub struct Configuration {
} }
impl Configuration { impl Configuration {
pub fn parse<S, I>(command: I) -> Result<Self, DocoptError> where I: IntoIterator<Item=S>, S: AsRef<str> { pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
let args = try!(Docopt::new(USAGE).and_then(|d| d.argv(command).decode())); let args = try!(Args::parse(command));
let config = Configuration { let config = Configuration {
args: args, args: args,
@ -628,8 +627,7 @@ impl Configuration {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use cli::USAGE; use cli::Args;
use docopt::Docopt;
use ethcore_rpc::NetworkSettings; use ethcore_rpc::NetworkSettings;
use ethcore::client::{VMType, BlockID}; use ethcore::client::{VMType, BlockID};
use helpers::{replace_home, default_network_config}; use helpers::{replace_home, default_network_config};
@ -647,21 +645,21 @@ mod tests {
fn parse(args: &[&str]) -> Configuration { fn parse(args: &[&str]) -> Configuration {
Configuration { Configuration {
args: Docopt::new(USAGE).unwrap().argv(args).decode().unwrap(), args: Args::parse_without_config(args).unwrap(),
} }
} }
#[test] #[test]
fn test_command_version() { fn test_command_version() {
let args = vec!["parity", "--version"]; let args = vec!["parity", "--version"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Version); assert_eq!(conf.into_command().unwrap(), Cmd::Version);
} }
#[test] #[test]
fn test_command_account_new() { fn test_command_account_new() {
let args = vec!["parity", "account", "new"]; let args = vec!["parity", "account", "new"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount { assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::New(NewAccount {
iterations: 10240, iterations: 10240,
path: replace_home("$HOME/.parity/keys"), path: replace_home("$HOME/.parity/keys"),
@ -672,7 +670,7 @@ mod tests {
#[test] #[test]
fn test_command_account_list() { fn test_command_account_list() {
let args = vec!["parity", "account", "list"]; let args = vec!["parity", "account", "list"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Account( assert_eq!(conf.into_command().unwrap(), Cmd::Account(
AccountCmd::List(replace_home("$HOME/.parity/keys"))) AccountCmd::List(replace_home("$HOME/.parity/keys")))
); );
@ -681,7 +679,7 @@ mod tests {
#[test] #[test]
fn test_command_account_import() { fn test_command_account_import() {
let args = vec!["parity", "account", "import", "my_dir", "another_dir"]; let args = vec!["parity", "account", "import", "my_dir", "another_dir"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts { assert_eq!(conf.into_command().unwrap(), Cmd::Account(AccountCmd::Import(ImportAccounts {
from: vec!["my_dir".into(), "another_dir".into()], from: vec!["my_dir".into(), "another_dir".into()],
to: replace_home("$HOME/.parity/keys"), to: replace_home("$HOME/.parity/keys"),
@ -691,7 +689,7 @@ mod tests {
#[test] #[test]
fn test_command_wallet_import() { fn test_command_wallet_import() {
let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"]; let args = vec!["parity", "wallet", "import", "my_wallet.json", "--password", "pwd"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet { assert_eq!(conf.into_command().unwrap(), Cmd::ImportPresaleWallet(ImportWallet {
iterations: 10240, iterations: 10240,
path: replace_home("$HOME/.parity/keys"), path: replace_home("$HOME/.parity/keys"),
@ -703,7 +701,7 @@ mod tests {
#[test] #[test]
fn test_command_blockchain_import() { fn test_command_blockchain_import() {
let args = vec!["parity", "import", "blockchain.json"]; let args = vec!["parity", "import", "blockchain.json"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain { assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Import(ImportBlockchain {
spec: Default::default(), spec: Default::default(),
logger_config: Default::default(), logger_config: Default::default(),
@ -723,7 +721,7 @@ mod tests {
#[test] #[test]
fn test_command_blockchain_export() { fn test_command_blockchain_export() {
let args = vec!["parity", "export", "blockchain.json"]; let args = vec!["parity", "export", "blockchain.json"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain {
spec: Default::default(), spec: Default::default(),
logger_config: Default::default(), logger_config: Default::default(),
@ -744,7 +742,7 @@ mod tests {
#[test] #[test]
fn test_command_blockchain_export_with_custom_format() { fn test_command_blockchain_export_with_custom_format() {
let args = vec!["parity", "export", "--format", "hex", "blockchain.json"]; let args = vec!["parity", "export", "--format", "hex", "blockchain.json"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { assert_eq!(conf.into_command().unwrap(), Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain {
spec: Default::default(), spec: Default::default(),
logger_config: Default::default(), logger_config: Default::default(),
@ -765,7 +763,7 @@ mod tests {
#[test] #[test]
fn test_command_signer_new_token() { fn test_command_signer_new_token() {
let args = vec!["parity", "signer", "new-token"]; let args = vec!["parity", "signer", "new-token"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
let expected = replace_home("$HOME/.parity/signer"); let expected = replace_home("$HOME/.parity/signer");
assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected)); assert_eq!(conf.into_command().unwrap(), Cmd::SignerToken(expected));
} }
@ -773,7 +771,7 @@ mod tests {
#[test] #[test]
fn test_run_cmd() { fn test_run_cmd() {
let args = vec!["parity"]; let args = vec!["parity"];
let conf = Configuration::parse(args).unwrap(); let conf = parse(&args);
assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd { assert_eq!(conf.into_command().unwrap(), Cmd::Run(RunCmd {
cache_config: Default::default(), cache_config: Default::default(),
dirs: Default::default(), dirs: Default::default(),
@ -962,7 +960,7 @@ mod tests {
let filename = temp.as_str().to_owned() + "/peers"; let filename = temp.as_str().to_owned() + "/peers";
File::create(filename.clone()).unwrap().write_all(b" \n\t\n").unwrap(); File::create(filename.clone()).unwrap().write_all(b" \n\t\n").unwrap();
let args = vec!["parity", "--reserved-peers", &filename]; let args = vec!["parity", "--reserved-peers", &filename];
let conf = Configuration::parse(args).unwrap(); let conf = Configuration::parse(&args).unwrap();
assert!(conf.init_reserved_nodes().is_ok()); assert!(conf.init_reserved_nodes().is_ok());
} }
} }

View File

@ -41,7 +41,9 @@ pub struct Informant {
skipped: AtomicUsize, skipped: AtomicUsize,
} }
trait MillisecondDuration { /// Something that can be converted to milliseconds.
pub trait MillisecondDuration {
/// Get the value in milliseconds.
fn as_milliseconds(&self) -> u64; fn as_milliseconds(&self) -> u64;
} }

View File

@ -51,6 +51,7 @@ extern crate ansi_term;
extern crate regex; extern crate regex;
extern crate isatty; extern crate isatty;
extern crate toml;
#[macro_use] #[macro_use]
extern crate ethcore_util as util; extern crate ethcore_util as util;
@ -110,7 +111,7 @@ mod boot;
mod stratum; mod stratum;
use std::{process, env}; use std::{process, env};
use cli::print_version; use cli::Args;
use configuration::{Cmd, Configuration}; use configuration::{Cmd, Configuration};
use deprecated::find_deprecated; use deprecated::find_deprecated;
@ -120,7 +121,7 @@ fn execute(command: Cmd) -> Result<String, String> {
try!(run::execute(run_cmd)); try!(run::execute(run_cmd));
Ok("".into()) Ok("".into())
}, },
Cmd::Version => Ok(print_version()), Cmd::Version => Ok(Args::print_version()),
Cmd::Account(account_cmd) => account::execute(account_cmd), Cmd::Account(account_cmd) => account::execute(account_cmd),
Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd),
Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd),
@ -130,7 +131,8 @@ fn execute(command: Cmd) -> Result<String, String> {
} }
fn start() -> Result<String, String> { fn start() -> Result<String, String> {
let conf = Configuration::parse(env::args()).unwrap_or_else(|e| e.exit()); let args: Vec<String> = env::args().collect();
let conf = Configuration::parse(&args).unwrap_or_else(|e| e.exit());
let deprecated = find_deprecated(&conf.args); let deprecated = find_deprecated(&conf.args);
for d in deprecated { for d in deprecated {

View File

@ -27,7 +27,7 @@ rlp = { path = "../util/rlp" }
rustc-serialize = "0.3" rustc-serialize = "0.3"
transient-hashmap = "0.1" transient-hashmap = "0.1"
serde_macros = { version = "0.8.0", optional = true } serde_macros = { version = "0.8.0", optional = true }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc = { path = "../ipc/rpc" }
time = "0.1" time = "0.1"

View File

@ -28,7 +28,8 @@ pub fn expect_no_params(params: Params) -> Result<(), Error> {
} }
} }
fn params_len(params: &Params) -> usize { /// Returns number of different parameters in given `Params` object.
pub fn params_len(params: &Params) -> usize {
match params { match params {
&Params::Array(ref vec) => vec.len(), &Params::Array(ref vec) => vec.len(),
_ => 0, _ => 0,

View File

@ -45,7 +45,7 @@ use v1::traits::Eth;
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256};
use v1::helpers::{CallRequest as CRequest, errors}; use v1::helpers::{CallRequest as CRequest, errors};
use v1::helpers::dispatch::{default_gas_price, dispatch_transaction}; use v1::helpers::dispatch::{default_gas_price, dispatch_transaction};
use v1::helpers::params::{expect_no_params, from_params_default_second, from_params_default_third}; use v1::helpers::params::{expect_no_params, params_len, from_params_default_second, from_params_default_third};
/// Eth RPC options /// Eth RPC options
pub struct EthClientOptions { pub struct EthClientOptions {
@ -498,11 +498,14 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
fn logs(&self, params: Params) -> Result<Value, Error> { fn logs(&self, params: Params) -> Result<Value, Error> {
try!(self.active()); try!(self.active());
from_params::<(Filter,)>(params) let params = match params_len(&params) {
.and_then(|(filter,)| { 1 => from_params::<(Filter, )>(params).map(|(filter, )| (filter, None)),
_ => from_params::<(Filter, usize)>(params).map(|(filter, val)| (filter, Some(val))),
};
params.and_then(|(filter, limit)| {
let include_pending = filter.to_block == Some(BlockNumber::Pending); let include_pending = filter.to_block == Some(BlockNumber::Pending);
let filter: EthcoreFilter = filter.into(); let filter: EthcoreFilter = filter.into();
let mut logs = take_weak!(self.client).logs(filter.clone()) let mut logs = take_weak!(self.client).logs(filter.clone(), limit)
.into_iter() .into_iter()
.map(From::from) .map(From::from)
.collect::<Vec<Log>>(); .collect::<Vec<Log>>();
@ -512,6 +515,14 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
logs.extend(pending); logs.extend(pending);
} }
let len = logs.len();
match limit {
Some(limit) if len >= limit => {
logs = logs.split_off(len - limit);
},
_ => {},
}
Ok(to_value(&logs)) Ok(to_value(&logs))
}) })
} }

View File

@ -152,7 +152,7 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
filter.to_block = BlockID::Latest; filter.to_block = BlockID::Latest;
// retrieve logs in range from_block..min(BlockID::Latest..to_block) // retrieve logs in range from_block..min(BlockID::Latest..to_block)
let mut logs = client.logs(filter.clone()) let mut logs = client.logs(filter.clone(), None)
.into_iter() .into_iter()
.map(From::from) .map(From::from)
.collect::<Vec<Log>>(); .collect::<Vec<Log>>();
@ -194,7 +194,7 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => {
let include_pending = filter.to_block == Some(BlockNumber::Pending); let include_pending = filter.to_block == Some(BlockNumber::Pending);
let filter: EthcoreFilter = filter.clone().into(); let filter: EthcoreFilter = filter.clone().into();
let mut logs = take_weak!(self.client).logs(filter.clone()) let mut logs = take_weak!(self.client).logs(filter.clone(), None)
.into_iter() .into_iter()
.map(From::from) .map(From::from)
.collect::<Vec<Log>>(); .collect::<Vec<Log>>();

View File

@ -149,6 +149,28 @@ fn rpc_eth_hashrate() {
assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned()));
} }
#[test]
fn rpc_eth_logs() {
let tester = EthTester::default();
let request = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned()));
}
#[test]
fn rpc_eth_logs_with_limit() {
let tester = EthTester::default();
let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}, 1], "id": 1}"#;
let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}, 0], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
assert_eq!(tester.io.handle_request_sync(request1), Some(response.to_owned()));
assert_eq!(tester.io.handle_request_sync(request2), Some(response.to_owned()));
}
#[test] #[test]
fn rpc_eth_submit_hashrate() { fn rpc_eth_submit_hashrate() {
let tester = EthTester::default(); let tester = EthTester::default();

6
scripts/deploy.sh Normal file
View File

@ -0,0 +1,6 @@
#!/bin/bash
ll
ls
la
echo "list of biniries"
exit

View File

@ -1,10 +1,10 @@
#!/bin/bash #!/bin/bash
export TARGETS=" export TARGETS="
-p bigint\
-p rlp\ -p rlp\
-p ethash \ -p ethash \
-p ethcore \ -p ethcore \
-p ethcore-bigint\
-p ethcore-dapps \ -p ethcore-dapps \
-p ethcore-rpc \ -p ethcore-rpc \
-p ethcore-signer \ -p ethcore-signer \

View File

@ -22,7 +22,7 @@ ethcore-rpc = { path = "../rpc" }
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
parity-dapps-signer = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true} parity-dapps-signer = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true}
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
[features] [features]
dev = ["clippy"] dev = ["clippy"]

View File

@ -127,7 +127,7 @@ impl Server {
// Spawn a thread with event loop // Spawn a thread with event loop
let handle = thread::spawn(move || { let handle = thread::spawn(move || {
ph.catch_panic(move || { ph.catch_panic(move || {
match ws.listen(addr.clone()).map_err(ServerError::from) { match ws.listen(addr).map_err(ServerError::from) {
Err(ServerError::IoError(io)) => die(format!( Err(ServerError::IoError(io)) => die(format!(
"Signer: Could not start listening on specified address. Make sure that no other instance is running on Signer's port. Details: {:?}", "Signer: Could not start listening on specified address. Make sure that no other instance is running on Signer's port. Details: {:?}",
io io

View File

@ -17,7 +17,7 @@ ethcore-network = { path = "../util/network" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
rlp = { path = "../util/rlp" } rlp = { path = "../util/rlp" }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
log = "0.3" log = "0.3"
env_logger = "0.3" env_logger = "0.3"
time = "0.1.34" time = "0.1.34"

View File

@ -807,7 +807,7 @@ impl ChainSync {
} }
let manifest_rlp = try!(r.at(0)); let manifest_rlp = try!(r.at(0));
let manifest = match ManifestData::from_rlp(&manifest_rlp.as_raw()) { let manifest = match ManifestData::from_rlp(manifest_rlp.as_raw()) {
Err(e) => { Err(e) => {
trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e); trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e);
io.disconnect_peer(peer_id); io.disconnect_peer(peer_id);
@ -995,7 +995,7 @@ impl ChainSync {
self.request_snapshot_data(io, peer_id); self.request_snapshot_data(io, peer_id);
} }
}, },
SyncState::SnapshotManifest => (), //already downloading from other peer SyncState::SnapshotManifest | //already downloading from other peer
SyncState::Waiting | SyncState::SnapshotWaiting => () SyncState::Waiting | SyncState::SnapshotWaiting => ()
} }
} }
@ -1439,8 +1439,7 @@ impl ChainSync {
}, },
None => { None => {
trace!(target: "sync", "{}: No manifest to return", peer_id); trace!(target: "sync", "{}: No manifest to return", peer_id);
let rlp = RlpStream::new_list(0); RlpStream::new_list(0)
rlp
} }
}; };
Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp))) Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp)))
@ -1457,8 +1456,7 @@ impl ChainSync {
rlp rlp
}, },
None => { None => {
let rlp = RlpStream::new_list(0); RlpStream::new_list(0)
rlp
} }
}; };
Ok(Some((SNAPSHOT_DATA_PACKET, rlp))) Ok(Some((SNAPSHOT_DATA_PACKET, rlp)))
@ -1543,6 +1541,7 @@ impl ChainSync {
}) })
} }
#[cfg_attr(feature="dev", allow(match_same_arms))]
pub fn maintain_peers(&mut self, io: &mut SyncIo) { pub fn maintain_peers(&mut self, io: &mut SyncIo) {
let tick = time::precise_time_s(); let tick = time::precise_time_s();
let mut aborting = Vec::new(); let mut aborting = Vec::new();
@ -1712,7 +1711,7 @@ impl ChainSync {
return 0; return 0;
} }
let all_transactions_hashes = transactions.iter().map(|ref tx| tx.hash()).collect::<HashSet<H256>>(); let all_transactions_hashes = transactions.iter().map(|tx| tx.hash()).collect::<HashSet<H256>>();
let all_transactions_rlp = { let all_transactions_rlp = {
let mut packet = RlpStream::new_list(transactions.len()); let mut packet = RlpStream::new_list(transactions.len());
for tx in &transactions { packet.append(tx); } for tx in &transactions { packet.append(tx); }
@ -1862,8 +1861,8 @@ mod tests {
fn return_receipts_empty() { fn return_receipts_empty() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let io = TestIo::new(&mut client, &ss, &mut queue, None);
let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0); let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0);
@ -1875,8 +1874,8 @@ mod tests {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let sync = dummy_sync_with_peer(H256::new(), &client); let sync = dummy_sync_with_peer(H256::new(), &client);
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut receipt_list = RlpStream::new_list(4); let mut receipt_list = RlpStream::new_list(4);
receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555"));
@ -1931,8 +1930,8 @@ mod tests {
let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect();
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let io = TestIo::new(&mut client, &ss, &mut queue, None);
let unknown: H256 = H256::new(); let unknown: H256 = H256::new();
let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0);
@ -1970,8 +1969,8 @@ mod tests {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let sync = dummy_sync_with_peer(H256::new(), &client); let sync = dummy_sync_with_peer(H256::new(), &client);
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut node_list = RlpStream::new_list(3); let mut node_list = RlpStream::new_list(3);
node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555"));
@ -2026,8 +2025,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let io = TestIo::new(&mut client, &mut ss, &mut queue, None); let io = TestIo::new(&mut client, &ss, &mut queue, None);
let lagging_peers = sync.get_lagging_peers(&chain_info, &io); let lagging_peers = sync.get_lagging_peers(&chain_info, &io);
@ -2058,8 +2057,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io); let peers = sync.get_lagging_peers(&chain_info, &io);
let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers);
@ -2079,8 +2078,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io); let peers = sync.get_lagging_peers(&chain_info, &io);
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers);
@ -2100,8 +2099,8 @@ mod tests {
let hash = client.block_hash(BlockID::Number(99)).unwrap(); let hash = client.block_hash(BlockID::Number(99)).unwrap();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io); let peers = sync.get_lagging_peers(&chain_info, &io);
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers);
@ -2120,8 +2119,8 @@ mod tests {
client.insert_transaction_to_queue(); client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peer_count = sync.propagate_new_transactions(&mut io); let peer_count = sync.propagate_new_transactions(&mut io);
// Try to propagate same transactions for the second time // Try to propagate same transactions for the second time
let peer_count2 = sync.propagate_new_transactions(&mut io); let peer_count2 = sync.propagate_new_transactions(&mut io);
@ -2142,8 +2141,8 @@ mod tests {
client.insert_transaction_to_queue(); client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peer_count = sync.propagate_new_transactions(&mut io); let peer_count = sync.propagate_new_transactions(&mut io);
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]); sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]);
// Try to propagate same transactions for the second time // Try to propagate same transactions for the second time
@ -2166,17 +2165,17 @@ mod tests {
client.insert_transaction_to_queue(); client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
// should sent some // should sent some
{ {
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peer_count = sync.propagate_new_transactions(&mut io); let peer_count = sync.propagate_new_transactions(&mut io);
assert_eq!(1, io.queue.len()); assert_eq!(1, io.queue.len());
assert_eq!(1, peer_count); assert_eq!(1, peer_count);
} }
// Insert some more // Insert some more
client.insert_transaction_to_queue(); client.insert_transaction_to_queue();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
// Propagate new transactions // Propagate new transactions
let peer_count2 = sync.propagate_new_transactions(&mut io); let peer_count2 = sync.propagate_new_transactions(&mut io);
// And now the peer should have all transactions // And now the peer should have all transactions
@ -2202,8 +2201,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
//sync.have_common_block = true; //sync.have_common_block = true;
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let block = UntrustedRlp::new(&block_data); let block = UntrustedRlp::new(&block_data);
@ -2221,8 +2220,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let block = UntrustedRlp::new(&block_data); let block = UntrustedRlp::new(&block_data);
@ -2237,8 +2236,8 @@ mod tests {
client.add_blocks(10, EachBlockWith::Uncle); client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let empty_data = vec![]; let empty_data = vec![];
let block = UntrustedRlp::new(&empty_data); let block = UntrustedRlp::new(&empty_data);
@ -2254,8 +2253,8 @@ mod tests {
client.add_blocks(10, EachBlockWith::Uncle); client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let hashes_data = get_dummy_hashes(); let hashes_data = get_dummy_hashes();
let hashes_rlp = UntrustedRlp::new(&hashes_data); let hashes_rlp = UntrustedRlp::new(&hashes_data);
@ -2271,8 +2270,8 @@ mod tests {
client.add_blocks(10, EachBlockWith::Uncle); client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let empty_hashes_data = vec![]; let empty_hashes_data = vec![];
let hashes_rlp = UntrustedRlp::new(&empty_hashes_data); let hashes_rlp = UntrustedRlp::new(&empty_hashes_data);
@ -2291,8 +2290,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io); let peers = sync.get_lagging_peers(&chain_info, &io);
sync.propagate_new_hashes(&chain_info, &mut io, &peers); sync.propagate_new_hashes(&chain_info, &mut io, &peers);
@ -2311,8 +2310,8 @@ mod tests {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info(); let chain_info = client.chain_info();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io); let peers = sync.get_lagging_peers(&chain_info, &io);
sync.propagate_blocks(&chain_info, &mut io, &[], &peers); sync.propagate_blocks(&chain_info, &mut io, &[], &peers);
@ -2346,8 +2345,8 @@ mod tests {
// when // when
{ {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks);
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]);
assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0);
@ -2361,8 +2360,8 @@ mod tests {
} }
{ {
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]); sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]);
} }
@ -2386,8 +2385,8 @@ mod tests {
let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let retracted_blocks = vec![client.block_hash_delta_minus(1)];
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut ss = TestSnapshotService::new(); let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &mut ss, &mut queue, None); let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
// when // when
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]);

View File

@ -74,14 +74,14 @@ impl SnapshotService for TestSnapshotService {
} }
fn status(&self) -> RestorationStatus { fn status(&self) -> RestorationStatus {
match &*self.restoration_manifest.lock() { match *self.restoration_manifest.lock() {
&Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() && Some(ref manifest) if self.state_restoration_chunks.lock().len() == manifest.state_hashes.len() &&
self.block_restoration_chunks.lock().len() == manifest.block_hashes.len() => RestorationStatus::Inactive, self.block_restoration_chunks.lock().len() == manifest.block_hashes.len() => RestorationStatus::Inactive,
&Some(_) => RestorationStatus::Ongoing { Some(_) => RestorationStatus::Ongoing {
state_chunks_done: self.state_restoration_chunks.lock().len() as u32, state_chunks_done: self.state_restoration_chunks.lock().len() as u32,
block_chunks_done: self.block_restoration_chunks.lock().len() as u32, block_chunks_done: self.block_restoration_chunks.lock().len() as u32,
}, },
&None => RestorationStatus::Inactive, None => RestorationStatus::Inactive,
} }
} }
@ -98,13 +98,13 @@ impl SnapshotService for TestSnapshotService {
} }
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.state_hashes.iter().any(|h| h == &hash)) { if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.state_hashes.iter().any(|h| h == &hash)) {
self.state_restoration_chunks.lock().insert(hash, chunk); self.state_restoration_chunks.lock().insert(hash, chunk);
} }
} }
fn restore_block_chunk(&self, hash: H256, chunk: Bytes) { fn restore_block_chunk(&self, hash: H256, chunk: Bytes) {
if self.restoration_manifest.lock().as_ref().map_or(false, |ref m| m.block_hashes.iter().any(|h| h == &hash)) { if self.restoration_manifest.lock().as_ref().map_or(false, |m| m.block_hashes.iter().any(|h| h == &hash)) {
self.block_restoration_chunks.lock().insert(hash, chunk); self.block_restoration_chunks.lock().insert(hash, chunk);
} }
} }

View File

@ -23,12 +23,12 @@ rlp = { path = "rlp" }
heapsize = { version = "0.3", features = ["unstable"] } heapsize = { version = "0.3", features = ["unstable"] }
itertools = "0.4" itertools = "0.4"
sha3 = { path = "sha3" } sha3 = { path = "sha3" }
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
libc = "0.2.7" libc = "0.2.7"
vergen = "0.1" vergen = "0.1"
target_info = "0.1" target_info = "0.1"
bigint = { path = "bigint" } ethcore-bigint = { path = "bigint" }
parking_lot = "0.2.6" parking_lot = "0.2.6"
using_queue = { path = "using_queue" } using_queue = { path = "using_queue" }
table = { path = "table" } table = { path = "table" }

View File

@ -1,8 +1,9 @@
[package] [package]
description = "Rust-assembler implementation of big integers arithmetic" description = "Large fixed-size integers and hash function outputs"
homepage = "http://ethcore.io" homepage = "http://ethcore.io"
repository = "https://github.com/ethcore/parity"
license = "GPL-3.0" license = "GPL-3.0"
name = "bigint" name = "ethcore-bigint"
version = "0.1.0" version = "0.1.0"
authors = ["Ethcore <admin@ethcore.io>"] authors = ["Ethcore <admin@ethcore.io>"]
build = "build.rs" build = "build.rs"

View File

@ -14,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Efficient large, fixed-size big integers and hashes.
#![cfg_attr(asm_available, feature(asm))] #![cfg_attr(asm_available, feature(asm))]
extern crate rand; extern crate rand;

View File

@ -30,11 +30,12 @@
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. // If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
// //
//! Big unsigned integer types //! Big unsigned integer types.
//! //!
//! Implementation of a various large-but-fixed sized unsigned integer types. //! Implementation of a various large-but-fixed sized unsigned integer types.
//! The functions here are designed to be fast. //! The functions here are designed to be fast. There are optional `x86_64`
//! //! implementations for even more speed, hidden behind the `x64_arithmetic`
//! feature flag.
use std::{mem, fmt}; use std::{mem, fmt};
use std::str::{FromStr}; use std::str::{FromStr};

View File

@ -14,7 +14,7 @@ time = "0.1.34"
tiny-keccak = "1.0" tiny-keccak = "1.0"
rust-crypto = "0.2.34" rust-crypto = "0.2.34"
slab = "0.2" slab = "0.2"
clippy = { version = "0.0.85", optional = true} clippy = { version = "0.0.90", optional = true}
igd = "0.5.0" igd = "0.5.0"
libc = "0.2.7" libc = "0.2.7"
parking_lot = "0.2.6" parking_lot = "0.2.6"

View File

@ -7,6 +7,6 @@ authors = ["Ethcore <admin@ethcore.io>"]
[dependencies] [dependencies]
elastic-array = "0.5" elastic-array = "0.5"
bigint = { path = "../bigint" } ethcore-bigint = { path = "../bigint" }
lazy_static = "0.2" lazy_static = "0.2"
rustc-serialize = "0.3" rustc-serialize = "0.3"

View File

@ -65,7 +65,7 @@ pub use self::rlpin::{Rlp, RlpIterator};
pub use self::rlpstream::RlpStream; pub use self::rlpstream::RlpStream;
pub use self::rlpcompression::RlpType; pub use self::rlpcompression::RlpType;
extern crate bigint; extern crate ethcore_bigint as bigint;
extern crate elastic_array; extern crate elastic_array;
extern crate rustc_serialize; extern crate rustc_serialize;

View File

@ -330,8 +330,8 @@ impl Database {
/// Commit buffered changes to database. /// Commit buffered changes to database.
pub fn flush(&self) -> Result<(), String> { pub fn flush(&self) -> Result<(), String> {
match &*self.db.read() { match *self.db.read() {
&Some(DBAndColumns { ref db, ref cfs }) => { Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new(); let batch = WriteBatch::new();
let mut overlay = self.overlay.write(); let mut overlay = self.overlay.write();
@ -366,15 +366,15 @@ impl Database {
} }
db.write_opt(batch, &self.write_opts) db.write_opt(batch, &self.write_opts)
}, },
&None => Err("Database is closed".to_owned()) None => Err("Database is closed".to_owned())
} }
} }
/// Commit transaction to database. /// Commit transaction to database.
pub fn write(&self, tr: DBTransaction) -> Result<(), String> { pub fn write(&self, tr: DBTransaction) -> Result<(), String> {
match &*self.db.read() { match *self.db.read() {
&Some(DBAndColumns { ref db, ref cfs }) => { Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new(); let batch = WriteBatch::new();
let ops = tr.ops; let ops = tr.ops;
for op in ops { for op in ops {
@ -393,14 +393,14 @@ impl Database {
} }
db.write_opt(batch, &self.write_opts) db.write_opt(batch, &self.write_opts)
}, },
&None => Err("Database is closed".to_owned()) None => Err("Database is closed".to_owned())
} }
} }
/// Get value by key. /// Get value by key.
pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<Bytes>, String> { pub fn get(&self, col: Option<u32>, key: &[u8]) -> Result<Option<Bytes>, String> {
match &*self.db.read() { match *self.db.read() {
&Some(DBAndColumns { ref db, ref cfs }) => { Some(DBAndColumns { ref db, ref cfs }) => {
let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
match overlay.get(key) { match overlay.get(key) {
Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Insert(ref value)) | Some(&KeyState::InsertCompressed(ref value)) => Ok(Some(value.clone())),
@ -412,15 +412,15 @@ impl Database {
}, },
} }
}, },
&None => Ok(None), None => Ok(None),
} }
} }
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
// TODO: support prefix seek for unflushed data // TODO: support prefix seek for unflushed data
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> { pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
match &*self.db.read() { match *self.db.read() {
&Some(DBAndColumns { ref db, ref cfs }) => { Some(DBAndColumns { ref db, ref cfs }) => {
let mut iter = col.map_or_else(|| db.iterator(IteratorMode::From(prefix, Direction::Forward)), let mut iter = col.map_or_else(|| db.iterator(IteratorMode::From(prefix, Direction::Forward)),
|c| db.iterator_cf(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap()); |c| db.iterator_cf(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward)).unwrap());
match iter.next() { match iter.next() {
@ -429,19 +429,19 @@ impl Database {
_ => None _ => None
} }
}, },
&None => None, None => None,
} }
} }
/// Get database iterator for flushed data. /// Get database iterator for flushed data.
pub fn iter(&self, col: Option<u32>) -> DatabaseIterator { pub fn iter(&self, col: Option<u32>) -> DatabaseIterator {
//TODO: iterate over overlay //TODO: iterate over overlay
match &*self.db.read() { match *self.db.read() {
&Some(DBAndColumns { ref db, ref cfs }) => { Some(DBAndColumns { ref db, ref cfs }) => {
col.map_or_else(|| DatabaseIterator { iter: db.iterator(IteratorMode::Start) }, col.map_or_else(|| DatabaseIterator { iter: db.iterator(IteratorMode::Start) },
|c| DatabaseIterator { iter: db.iterator_cf(cfs[c as usize], IteratorMode::Start).unwrap() }) |c| DatabaseIterator { iter: db.iterator_cf(cfs[c as usize], IteratorMode::Start).unwrap() })
}, },
&None => panic!("Not supported yet") //TODO: return an empty iterator or change return type None => panic!("Not supported yet") //TODO: return an empty iterator or change return type
} }
} }

View File

@ -99,7 +99,7 @@ extern crate time;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
extern crate libc; extern crate libc;
extern crate target_info; extern crate target_info;
extern crate bigint; extern crate ethcore_bigint as bigint;
extern crate parking_lot; extern crate parking_lot;
extern crate ansi_term; extern crate ansi_term;
extern crate tiny_keccak; extern crate tiny_keccak;

View File

@ -63,6 +63,12 @@ pub struct BasicRecorder {
min_depth: u32, min_depth: u32,
} }
impl Default for BasicRecorder {
fn default() -> Self {
BasicRecorder::new()
}
}
impl BasicRecorder { impl BasicRecorder {
/// Create a new `BasicRecorder` which records all given nodes. /// Create a new `BasicRecorder` which records all given nodes.
#[inline] #[inline]

View File

@ -128,7 +128,7 @@ impl<'db> TrieDB<'db> {
} }
/// Get the root node's RLP. /// Get the root node's RLP.
fn root_node<'a, R: 'a + Recorder>(&self, r: &'a mut R) -> super::Result<Node> { fn root_node<R: Recorder>(&self, r: &mut R) -> super::Result<Node> {
self.root_data(r).map(Node::decoded) self.root_data(r).map(Node::decoded)
} }