Merge remote-tracking branch 'parity/master'
This commit is contained in:
commit
5fac941738
106
.gitlab-ci.yml
106
.gitlab-ci.yml
@ -1,7 +1,6 @@
|
|||||||
stages:
|
stages:
|
||||||
- build
|
- build
|
||||||
- test
|
- test
|
||||||
- deploy
|
|
||||||
variables:
|
variables:
|
||||||
GIT_DEPTH: "3"
|
GIT_DEPTH: "3"
|
||||||
SIMPLECOV: "true"
|
SIMPLECOV: "true"
|
||||||
@ -18,18 +17,39 @@ linux-stable:
|
|||||||
- tags
|
- tags
|
||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- export
|
|
||||||
- cargo build --release --verbose
|
- cargo build --release --verbose
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
- mkdir -p x86_64-unknown-linux-gnu/stable
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/parity x86_64-unknown-linux-gnu/stable/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-stable
|
- rust-stable
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- x86_64-unknown-linux-gnu/stable/parity
|
- target/release/parity
|
||||||
name: "stable-x86_64-unknown-linux-gnu_parity"
|
name: "stable-x86_64-unknown-linux-gnu_parity"
|
||||||
|
linux-stable-14.04:
|
||||||
|
stage: build
|
||||||
|
image: ethcore/rust-14.04:latest
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- beta
|
||||||
|
- tags
|
||||||
|
- stable
|
||||||
|
script:
|
||||||
|
- cargo build --release --verbose
|
||||||
|
- strip target/release/parity
|
||||||
|
- aws configure set aws_access_key_id $s3_key
|
||||||
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity
|
||||||
|
tags:
|
||||||
|
- rust
|
||||||
|
- rust-14.04
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- target/release/parity
|
||||||
|
name: "stable-x86_64-unknown-ubuntu_14_04-gnu_parity"
|
||||||
linux-beta:
|
linux-beta:
|
||||||
stage: build
|
stage: build
|
||||||
image: ethcore/rust:beta
|
image: ethcore/rust:beta
|
||||||
@ -39,17 +59,14 @@ linux-beta:
|
|||||||
- tags
|
- tags
|
||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- export
|
|
||||||
- cargo build --release --verbose
|
- cargo build --release --verbose
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
- mkdir -p x86_64-unknown-linux-gnu/beta
|
|
||||||
- cp target/release/parity x86_64-unknown-linux-gnu/beta/parity
|
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-beta
|
- rust-beta
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- x86_64-unknown-linux-gnu/beta/parity
|
- target/release/parity
|
||||||
name: "beta-x86_64-unknown-linux-gnu_parity"
|
name: "beta-x86_64-unknown-linux-gnu_parity"
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
linux-nightly:
|
linux-nightly:
|
||||||
@ -63,14 +80,12 @@ linux-nightly:
|
|||||||
script:
|
script:
|
||||||
- cargo build --release --verbose
|
- cargo build --release --verbose
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
- mkdir -p x86_64-unknown-linux-gnu/nightly
|
|
||||||
- cp target/release/parity x86_64-unknown-linux-gnu/nigthly/parity
|
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-nightly
|
- rust-nightly
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- x86_64-unknown-linux-gnu/nigthly/parity
|
- target/release/parity
|
||||||
name: "nigthly-x86_64-unknown-linux-gnu_parity"
|
name: "nigthly-x86_64-unknown-linux-gnu_parity"
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
linux-centos:
|
linux-centos:
|
||||||
@ -86,25 +101,25 @@ linux-centos:
|
|||||||
- export CC="gcc"
|
- export CC="gcc"
|
||||||
- cargo build --release --verbose
|
- cargo build --release --verbose
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
- mkdir -p x86_64-unknown-linux-gnu/centos
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/parity x86_64-unknown-linux-gnu/centos/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-centos
|
- rust-centos
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- x86_64-unknown-linux-gnu/centos/parity
|
- target/release/parity
|
||||||
name: "centos-x86_64-unknown-linux-gnu_parity"
|
name: "x86_64-unknown-centos-gnu_parity"
|
||||||
linux-armv7:
|
linux-armv7:
|
||||||
stage: build
|
stage: build
|
||||||
image: ethcore/rust-arm:latest
|
image: ethcore/rust-armv7:latest
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- beta
|
- beta
|
||||||
- tags
|
- tags
|
||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- export
|
|
||||||
- rm -rf .cargo
|
- rm -rf .cargo
|
||||||
- mkdir -p .cargo
|
- mkdir -p .cargo
|
||||||
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
|
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
|
||||||
@ -112,14 +127,15 @@ linux-armv7:
|
|||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
|
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
|
||||||
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
|
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
|
||||||
- mkdir -p armv7_unknown_linux_gnueabihf
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/party armv7_unknown_linux_gnueabihf/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-arm
|
- rust-arm
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- armv7-unknown-linux-gnueabihf/parity
|
- target/armv7-unknown-linux-gnueabihf/release/parity
|
||||||
name: "armv7_unknown_linux_gnueabihf_parity"
|
name: "armv7_unknown_linux_gnueabihf_parity"
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
linux-arm:
|
linux-arm:
|
||||||
@ -131,7 +147,6 @@ linux-arm:
|
|||||||
- tags
|
- tags
|
||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- export
|
|
||||||
- rm -rf .cargo
|
- rm -rf .cargo
|
||||||
- mkdir -p .cargo
|
- mkdir -p .cargo
|
||||||
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
|
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
|
||||||
@ -139,26 +154,26 @@ linux-arm:
|
|||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose
|
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose
|
||||||
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
|
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
|
||||||
- mkdir -p arm-unknown-linux-gnueabihf
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/parity arm-unknown-linux-gnueabihf/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-arm
|
- rust-arm
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- arm-unknown-linux-gnueabihf/parity
|
- target/arm-unknown-linux-gnueabihf/release/parity
|
||||||
name: "arm-unknown-linux-gnueabihf_parity"
|
name: "arm-unknown-linux-gnueabihf_parity"
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
linux-armv6:
|
linux-armv6:
|
||||||
stage: build
|
stage: build
|
||||||
image: ethcore/rust-arm:latest
|
image: ethcore/rust-armv6:latest
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- beta
|
- beta
|
||||||
- tags
|
- tags
|
||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- export
|
|
||||||
- rm -rf .cargo
|
- rm -rf .cargo
|
||||||
- mkdir -p .cargo
|
- mkdir -p .cargo
|
||||||
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
|
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
|
||||||
@ -166,26 +181,26 @@ linux-armv6:
|
|||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target arm-unknown-linux-gnueabi --release --verbose
|
- cargo build --target arm-unknown-linux-gnueabi --release --verbose
|
||||||
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
|
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
|
||||||
- mkdir -p arm-unknown-linux-gnueabi
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/parity arm-unknown-linux-gnueabi/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-arm
|
- rust-arm
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- arm-unknown-linux-gnueabi/parity
|
- target/arm-unknown-linux-gnueabi/release/parity
|
||||||
name: "arm-unknown-linux-gnueabi_parity"
|
name: "arm-unknown-linux-gnueabi_parity"
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
linux-aarch64:
|
linux-aarch64:
|
||||||
stage: build
|
stage: build
|
||||||
image: ethcore/rust-arm:latest
|
image: ethcore/rust-aarch64:latest
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- beta
|
- beta
|
||||||
- tags
|
- tags
|
||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- export
|
|
||||||
- rm -rf .cargo
|
- rm -rf .cargo
|
||||||
- mkdir -p .cargo
|
- mkdir -p .cargo
|
||||||
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
|
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
|
||||||
@ -193,14 +208,15 @@ linux-aarch64:
|
|||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target aarch64-unknown-linux-gnu --release --verbose
|
- cargo build --target aarch64-unknown-linux-gnu --release --verbose
|
||||||
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
|
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
|
||||||
- mkdir -p aarch64-unknown-linux-gnu
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/parity aarch64-unknown-linux-gnu/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-arm
|
- rust-arm
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- aarch64-unknown-linux-gnu/parity
|
- target/aarch64-unknown-linux-gnu/release/parity
|
||||||
name: "aarch64-unknown-linux-gnu_parity"
|
name: "aarch64-unknown-linux-gnu_parity"
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
darwin:
|
darwin:
|
||||||
@ -212,13 +228,14 @@ darwin:
|
|||||||
- stable
|
- stable
|
||||||
script:
|
script:
|
||||||
- cargo build --release --verbose
|
- cargo build --release --verbose
|
||||||
- mkdir -p x86_64-apple-darwin
|
- aws configure set aws_access_key_id $s3_key
|
||||||
- cp target/release/parity x86_64-apple-darwin/parity
|
- aws configure set aws_secret_access_key $s3_secret
|
||||||
|
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity
|
||||||
tags:
|
tags:
|
||||||
- osx
|
- osx
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- x86_64-apple-darwin/parity
|
- target/release/parity
|
||||||
name: "x86_64-apple-darwin_parity"
|
name: "x86_64-apple-darwin_parity"
|
||||||
windows:
|
windows:
|
||||||
stage: build
|
stage: build
|
||||||
@ -233,6 +250,10 @@ windows:
|
|||||||
- set RUST_BACKTRACE=1
|
- set RUST_BACKTRACE=1
|
||||||
- rustup default stable-x86_64-pc-windows-msvc
|
- rustup default stable-x86_64-pc-windows-msvc
|
||||||
- cargo build --release --verbose
|
- cargo build --release --verbose
|
||||||
|
- aws configure set aws_access_key_id %s3_key%
|
||||||
|
- aws configure set aws_secret_access_key %s3_secret%
|
||||||
|
- aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe
|
||||||
|
- aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb
|
||||||
tags:
|
tags:
|
||||||
- rust-windows
|
- rust-windows
|
||||||
artifacts:
|
artifacts:
|
||||||
@ -250,12 +271,3 @@ test-linux:
|
|||||||
- rust-test
|
- rust-test
|
||||||
dependencies:
|
dependencies:
|
||||||
- linux-stable
|
- linux-stable
|
||||||
deploy-binaries:
|
|
||||||
stage: deploy
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- beta
|
|
||||||
- tags
|
|
||||||
- stable
|
|
||||||
script:
|
|
||||||
- scripts/deploy.sh
|
|
||||||
|
11
Cargo.lock
generated
11
Cargo.lock
generated
@ -37,6 +37,8 @@ dependencies = [
|
|||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -281,6 +283,7 @@ dependencies = [
|
|||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
"ethkey 0.2.0",
|
"ethkey 0.2.0",
|
||||||
"ethstore 0.1.0",
|
"ethstore 0.1.0",
|
||||||
|
"evmjit 1.4.0",
|
||||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -458,6 +461,7 @@ dependencies = [
|
|||||||
"ethcore-io 1.4.0",
|
"ethcore-io 1.4.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.4.0",
|
||||||
"ethcore-util 1.4.0",
|
"ethcore-util 1.4.0",
|
||||||
|
"ethcrypto 0.1.0",
|
||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
"ethkey 0.2.0",
|
"ethkey 0.2.0",
|
||||||
"ethstore 0.1.0",
|
"ethstore 0.1.0",
|
||||||
@ -621,6 +625,13 @@ dependencies = [
|
|||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "evmjit"
|
||||||
|
version = "1.4.0"
|
||||||
|
dependencies = [
|
||||||
|
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fdlimit"
|
name = "fdlimit"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
@ -44,6 +44,8 @@ json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
|
|||||||
ethcore-dapps = { path = "dapps", optional = true }
|
ethcore-dapps = { path = "dapps", optional = true }
|
||||||
clippy = { version = "0.0.90", optional = true}
|
clippy = { version = "0.0.90", optional = true}
|
||||||
ethcore-stratum = { path = "stratum" }
|
ethcore-stratum = { path = "stratum" }
|
||||||
|
serde = "0.8.0"
|
||||||
|
serde_json = "0.8.0"
|
||||||
|
|
||||||
[target.'cfg(windows)'.dependencies]
|
[target.'cfg(windows)'.dependencies]
|
||||||
winapi = "0.2"
|
winapi = "0.2"
|
||||||
@ -61,11 +63,13 @@ ui = ["dapps", "ethcore-signer/ui"]
|
|||||||
use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"]
|
use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"]
|
||||||
dapps = ["ethcore-dapps"]
|
dapps = ["ethcore-dapps"]
|
||||||
ipc = ["ethcore/ipc"]
|
ipc = ["ethcore/ipc"]
|
||||||
|
jit = ["ethcore/jit"]
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
|
||||||
json-tests = ["ethcore/json-tests"]
|
json-tests = ["ethcore/json-tests"]
|
||||||
stratum = ["ipc"]
|
stratum = ["ipc"]
|
||||||
ethkey-cli = ["ethcore/ethkey-cli"]
|
ethkey-cli = ["ethcore/ethkey-cli"]
|
||||||
ethstore-cli = ["ethcore/ethstore-cli"]
|
ethstore-cli = ["ethcore/ethstore-cli"]
|
||||||
|
evm-debug = ["ethcore/evm-debug"]
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
path = "parity/main.rs"
|
path = "parity/main.rs"
|
||||||
|
@ -14,8 +14,8 @@ Be sure to check out [our wiki][wiki-url] for more information.
|
|||||||
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg
|
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg
|
||||||
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
||||||
[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg
|
[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg
|
||||||
[license-url]: http://www.gnu.org/licenses/gpl-3.0.en.html
|
[license-url]: https://www.gnu.org/licenses/gpl-3.0.en.html
|
||||||
[doc-url]: http://ethcore.github.io/parity/ethcore/index.html
|
[doc-url]: https://ethcore.github.io/parity/ethcore/index.html
|
||||||
[wiki-url]: https://github.com/ethcore/parity/wiki
|
[wiki-url]: https://github.com/ethcore/parity/wiki
|
||||||
|
|
||||||
----
|
----
|
||||||
|
@ -15,23 +15,26 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use hyper::{server, net, Decoder, Encoder, Next};
|
use hyper::{server, net, Decoder, Encoder, Next, Control};
|
||||||
use api::types::{App, ApiError};
|
use api::types::{App, ApiError};
|
||||||
use api::response::{as_json, as_json_error, ping_response};
|
use api::response::{as_json, as_json_error, ping_response};
|
||||||
use handlers::extract_url;
|
use handlers::extract_url;
|
||||||
use endpoint::{Endpoint, Endpoints, Handler, EndpointPath};
|
use endpoint::{Endpoint, Endpoints, Handler, EndpointPath};
|
||||||
|
use apps::fetcher::ContentFetcher;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RestApi {
|
pub struct RestApi {
|
||||||
local_domain: String,
|
local_domain: String,
|
||||||
endpoints: Arc<Endpoints>,
|
endpoints: Arc<Endpoints>,
|
||||||
|
fetcher: Arc<ContentFetcher>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RestApi {
|
impl RestApi {
|
||||||
pub fn new(local_domain: String, endpoints: Arc<Endpoints>) -> Box<Endpoint> {
|
pub fn new(local_domain: String, endpoints: Arc<Endpoints>, fetcher: Arc<ContentFetcher>) -> Box<Endpoint> {
|
||||||
Box::new(RestApi {
|
Box::new(RestApi {
|
||||||
local_domain: local_domain,
|
local_domain: local_domain,
|
||||||
endpoints: endpoints,
|
endpoints: endpoints,
|
||||||
|
fetcher: fetcher,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,23 +46,42 @@ impl RestApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Endpoint for RestApi {
|
impl Endpoint for RestApi {
|
||||||
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
|
fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box<Handler> {
|
||||||
Box::new(RestApiRouter {
|
Box::new(RestApiRouter::new(self.clone(), path, control))
|
||||||
api: self.clone(),
|
|
||||||
handler: as_json_error(&ApiError {
|
|
||||||
code: "404".into(),
|
|
||||||
title: "Not Found".into(),
|
|
||||||
detail: "Resource you requested has not been found.".into(),
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RestApiRouter {
|
struct RestApiRouter {
|
||||||
api: RestApi,
|
api: RestApi,
|
||||||
|
path: Option<EndpointPath>,
|
||||||
|
control: Option<Control>,
|
||||||
handler: Box<Handler>,
|
handler: Box<Handler>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl RestApiRouter {
|
||||||
|
fn new(api: RestApi, path: EndpointPath, control: Control) -> Self {
|
||||||
|
RestApiRouter {
|
||||||
|
path: Some(path),
|
||||||
|
control: Some(control),
|
||||||
|
api: api,
|
||||||
|
handler: as_json_error(&ApiError {
|
||||||
|
code: "404".into(),
|
||||||
|
title: "Not Found".into(),
|
||||||
|
detail: "Resource you requested has not been found.".into(),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option<Box<Handler>> {
|
||||||
|
match hash {
|
||||||
|
Some(hash) if self.api.fetcher.contains(hash) => {
|
||||||
|
Some(self.api.fetcher.to_async_handler(path, control))
|
||||||
|
},
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl server::Handler<net::HttpStream> for RestApiRouter {
|
impl server::Handler<net::HttpStream> for RestApiRouter {
|
||||||
|
|
||||||
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next {
|
fn on_request(&mut self, request: server::Request<net::HttpStream>) -> Next {
|
||||||
@ -69,13 +91,18 @@ impl server::Handler<net::HttpStream> for RestApiRouter {
|
|||||||
return Next::write();
|
return Next::write();
|
||||||
}
|
}
|
||||||
|
|
||||||
let url = url.expect("Check for None is above; qed");
|
let url = url.expect("Check for None early-exists above; qed");
|
||||||
|
let path = self.path.take().expect("on_request called only once, and path is always defined in new; qed");
|
||||||
|
let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed");
|
||||||
|
|
||||||
let endpoint = url.path.get(1).map(|v| v.as_str());
|
let endpoint = url.path.get(1).map(|v| v.as_str());
|
||||||
|
let hash = url.path.get(2).map(|v| v.as_str());
|
||||||
|
|
||||||
let handler = endpoint.and_then(|v| match v {
|
let handler = endpoint.and_then(|v| match v {
|
||||||
"apps" => Some(as_json(&self.api.list_apps())),
|
"apps" => Some(as_json(&self.api.list_apps())),
|
||||||
"ping" => Some(ping_response(&self.api.local_domain)),
|
"ping" => Some(ping_response(&self.api.local_domain)),
|
||||||
_ => None,
|
"content" => self.resolve_content(hash, path, control),
|
||||||
|
_ => None
|
||||||
});
|
});
|
||||||
|
|
||||||
// Overwrite default
|
// Overwrite default
|
||||||
|
@ -42,7 +42,9 @@ pub type Handler = server::Handler<net::HttpStream> + Send;
|
|||||||
pub trait Endpoint : Send + Sync {
|
pub trait Endpoint : Send + Sync {
|
||||||
fn info(&self) -> Option<&EndpointInfo> { None }
|
fn info(&self) -> Option<&EndpointInfo> { None }
|
||||||
|
|
||||||
fn to_handler(&self, path: EndpointPath) -> Box<Handler>;
|
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
|
||||||
|
panic!("This Endpoint is asynchronous and requires Control object.");
|
||||||
|
}
|
||||||
|
|
||||||
fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box<Handler> {
|
fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box<Handler> {
|
||||||
self.to_handler(path)
|
self.to_handler(path)
|
||||||
|
@ -196,8 +196,11 @@ impl Server {
|
|||||||
let special = Arc::new({
|
let special = Arc::new({
|
||||||
let mut special = HashMap::new();
|
let mut special = HashMap::new();
|
||||||
special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone()));
|
special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone()));
|
||||||
special.insert(router::SpecialEndpoint::Api, api::RestApi::new(format!("{}", addr), endpoints.clone()));
|
|
||||||
special.insert(router::SpecialEndpoint::Utils, apps::utils());
|
special.insert(router::SpecialEndpoint::Utils, apps::utils());
|
||||||
|
special.insert(
|
||||||
|
router::SpecialEndpoint::Api,
|
||||||
|
api::RestApi::new(format!("{}", addr), endpoints.clone(), content_fetcher.clone())
|
||||||
|
);
|
||||||
special
|
special
|
||||||
});
|
});
|
||||||
let hosts = Self::allowed_hosts(hosts, format!("{}", addr));
|
let hosts = Self::allowed_hosts(hosts, format!("{}", addr));
|
||||||
|
@ -91,7 +91,7 @@ impl<A: Authorization + 'static> server::Handler<HttpStream> for Router<A> {
|
|||||||
(Some(ref path), _) if self.fetch.contains(&path.app_id) => {
|
(Some(ref path), _) if self.fetch.contains(&path.app_id) => {
|
||||||
self.fetch.to_async_handler(path.clone(), control)
|
self.fetch.to_async_handler(path.clone(), control)
|
||||||
},
|
},
|
||||||
// Redirection to main page (maybe 404 instead?)
|
// 404 for non-existent content
|
||||||
(Some(ref path), _) if *req.method() == hyper::method::Method::Get => {
|
(Some(ref path), _) if *req.method() == hyper::method::Method::Get => {
|
||||||
let address = apps::redirection_address(path.using_dapps_domains, self.main_page);
|
let address = apps::redirection_address(path.using_dapps_domains, self.main_page);
|
||||||
Box::new(ContentHandler::error(
|
Box::new(ContentHandler::error(
|
||||||
@ -143,7 +143,7 @@ impl<A: Authorization> Router<A> {
|
|||||||
allowed_hosts: Option<Vec<String>>,
|
allowed_hosts: Option<Vec<String>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
|
||||||
let handler = special.get(&SpecialEndpoint::Api).unwrap().to_handler(EndpointPath::default());
|
let handler = special.get(&SpecialEndpoint::Utils).unwrap().to_handler(EndpointPath::default());
|
||||||
Router {
|
Router {
|
||||||
control: Some(control),
|
control: Some(control),
|
||||||
main_page: main_page,
|
main_page: main_page,
|
||||||
|
@ -38,10 +38,6 @@ struct RpcEndpoint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Endpoint for RpcEndpoint {
|
impl Endpoint for RpcEndpoint {
|
||||||
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
|
|
||||||
panic!("RPC Endpoint is asynchronous and requires Control object.");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box<Handler> {
|
fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box<Handler> {
|
||||||
let panic_handler = PanicHandler { handler: self.panic_handler.clone() };
|
let panic_handler = PanicHandler { handler: self.panic_handler.clone() };
|
||||||
Box::new(ServerHandler::new(
|
Box::new(ServerHandler::new(
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use tests::helpers::{serve, request};
|
use tests::helpers::{serve, serve_with_registrar, request};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_return_error() {
|
fn should_return_error() {
|
||||||
@ -82,3 +82,24 @@ fn should_handle_ping() {
|
|||||||
assert_eq!(response.body, "0\n\n".to_owned());
|
assert_eq!(response.body, "0\n\n".to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_try_to_resolve_dapp() {
|
||||||
|
// given
|
||||||
|
let (server, registrar) = serve_with_registrar();
|
||||||
|
|
||||||
|
// when
|
||||||
|
let response = request(server,
|
||||||
|
"\
|
||||||
|
GET /api/content/1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d HTTP/1.1\r\n\
|
||||||
|
Host: home.parity\r\n\
|
||||||
|
Connection: close\r\n\
|
||||||
|
\r\n\
|
||||||
|
"
|
||||||
|
);
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned());
|
||||||
|
assert_eq!(registrar.calls.lock().len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
use std::io::{Read, Write};
|
use std::io::{Read, Write};
|
||||||
use std::str::{self, Lines};
|
use std::str::{self, Lines};
|
||||||
use std::net::{TcpStream, SocketAddr};
|
use std::net::{TcpStream, SocketAddr};
|
||||||
@ -43,10 +44,11 @@ pub fn read_block(lines: &mut Lines, all: bool) -> String {
|
|||||||
|
|
||||||
pub fn request(address: &SocketAddr, request: &str) -> Response {
|
pub fn request(address: &SocketAddr, request: &str) -> Response {
|
||||||
let mut req = TcpStream::connect(address).unwrap();
|
let mut req = TcpStream::connect(address).unwrap();
|
||||||
|
req.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
|
||||||
req.write_all(request.as_bytes()).unwrap();
|
req.write_all(request.as_bytes()).unwrap();
|
||||||
|
|
||||||
let mut response = String::new();
|
let mut response = String::new();
|
||||||
req.read_to_string(&mut response).unwrap();
|
let _ = req.read_to_string(&mut response);
|
||||||
|
|
||||||
let mut lines = response.lines();
|
let mut lines = response.lines();
|
||||||
let status = lines.next().unwrap().to_owned();
|
let status = lines.next().unwrap().to_owned();
|
||||||
|
@ -23,7 +23,8 @@ use std::ops::{Deref, DerefMut};
|
|||||||
use rand::random;
|
use rand::random;
|
||||||
|
|
||||||
pub struct RandomTempPath {
|
pub struct RandomTempPath {
|
||||||
path: PathBuf
|
path: PathBuf,
|
||||||
|
pub panic_on_drop_failure: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn random_filename() -> String {
|
pub fn random_filename() -> String {
|
||||||
@ -39,7 +40,8 @@ impl RandomTempPath {
|
|||||||
let mut dir = env::temp_dir();
|
let mut dir = env::temp_dir();
|
||||||
dir.push(random_filename());
|
dir.push(random_filename());
|
||||||
RandomTempPath {
|
RandomTempPath {
|
||||||
path: dir.clone()
|
path: dir.clone(),
|
||||||
|
panic_on_drop_failure: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,7 +50,8 @@ impl RandomTempPath {
|
|||||||
dir.push(random_filename());
|
dir.push(random_filename());
|
||||||
fs::create_dir_all(dir.as_path()).unwrap();
|
fs::create_dir_all(dir.as_path()).unwrap();
|
||||||
RandomTempPath {
|
RandomTempPath {
|
||||||
path: dir.clone()
|
path: dir.clone(),
|
||||||
|
panic_on_drop_failure: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,12 +75,20 @@ impl AsRef<Path> for RandomTempPath {
|
|||||||
self.as_path()
|
self.as_path()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl Deref for RandomTempPath {
|
||||||
|
type Target = Path;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
self.as_path()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Drop for RandomTempPath {
|
impl Drop for RandomTempPath {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
if let Err(_) = fs::remove_dir_all(&self) {
|
if let Err(_) = fs::remove_dir_all(&self) {
|
||||||
if let Err(e) = fs::remove_file(&self) {
|
if let Err(e) = fs::remove_file(&self) {
|
||||||
panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e);
|
if self.panic_on_drop_failure {
|
||||||
|
panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -322,6 +322,26 @@ impl AccountProvider {
|
|||||||
Ok(signature)
|
Ok(signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Decrypts a message. Account must be unlocked.
|
||||||
|
pub fn decrypt(&self, account: Address, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
|
let data = {
|
||||||
|
let mut unlocked = self.unlocked.lock();
|
||||||
|
let data = try!(unlocked.get(&account).ok_or(Error::NotUnlocked)).clone();
|
||||||
|
if let Unlock::Temp = data.unlock {
|
||||||
|
unlocked.remove(&account).expect("data exists: so key must exist: qed");
|
||||||
|
}
|
||||||
|
if let Unlock::Timed((ref start, ref duration)) = data.unlock {
|
||||||
|
if start.elapsed() > Duration::from_millis(*duration as u64) {
|
||||||
|
unlocked.remove(&account).expect("data exists: so key must exist: qed");
|
||||||
|
return Err(Error::NotUnlocked);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(try!(self.sstore.decrypt(&account, &data.password, shared_mac, message)))
|
||||||
|
}
|
||||||
|
|
||||||
/// Unlocks an account, signs the message, and locks it again.
|
/// Unlocks an account, signs the message, and locks it again.
|
||||||
pub fn sign_with_password(&self, account: Address, password: String, message: Message) -> Result<Signature, Error> {
|
pub fn sign_with_password(&self, account: Address, password: String, message: Message) -> Result<Signature, Error> {
|
||||||
let signature = try!(self.sstore.sign(&account, &password, &message));
|
let signature = try!(self.sstore.sign(&account, &password, &message));
|
||||||
|
@ -205,7 +205,6 @@ pub struct ClosedBlock {
|
|||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
uncle_bytes: Bytes,
|
uncle_bytes: Bytes,
|
||||||
last_hashes: Arc<LastHashes>,
|
last_hashes: Arc<LastHashes>,
|
||||||
unclosed_state: State,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just like `ClosedBlock` except that we can't reopen it and it's faster.
|
/// Just like `ClosedBlock` except that we can't reopen it and it's faster.
|
||||||
@ -343,18 +342,19 @@ impl<'x> OpenBlock<'x> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles.
|
/// Turn this into a `ClosedBlock`.
|
||||||
pub fn close(self) -> ClosedBlock {
|
pub fn close(self) -> ClosedBlock {
|
||||||
let mut s = self;
|
let mut s = self;
|
||||||
|
|
||||||
let unclosed_state = s.block.state.clone();
|
// take a snapshot so the engine's changes can be rolled back.
|
||||||
|
s.block.state.snapshot();
|
||||||
|
|
||||||
s.engine.on_close_block(&mut s.block);
|
s.engine.on_close_block(&mut s.block);
|
||||||
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect()));
|
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec())));
|
||||||
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
|
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
|
||||||
s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
|
s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
|
||||||
s.block.base.header.set_state_root(s.block.state.root().clone());
|
s.block.base.header.set_state_root(s.block.state.root().clone());
|
||||||
s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect()));
|
s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec())));
|
||||||
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
|
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
|
||||||
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
|
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
|
||||||
|
|
||||||
@ -362,33 +362,37 @@ impl<'x> OpenBlock<'x> {
|
|||||||
block: s.block,
|
block: s.block,
|
||||||
uncle_bytes: uncle_bytes,
|
uncle_bytes: uncle_bytes,
|
||||||
last_hashes: s.last_hashes,
|
last_hashes: s.last_hashes,
|
||||||
unclosed_state: unclosed_state,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Turn this into a `LockedBlock`. A BlockChain must be provided in order to figure out the uncles.
|
/// Turn this into a `LockedBlock`.
|
||||||
pub fn close_and_lock(self) -> LockedBlock {
|
pub fn close_and_lock(self) -> LockedBlock {
|
||||||
let mut s = self;
|
let mut s = self;
|
||||||
|
|
||||||
|
// take a snapshot so the engine's changes can be rolled back.
|
||||||
|
s.block.state.snapshot();
|
||||||
|
|
||||||
s.engine.on_close_block(&mut s.block);
|
s.engine.on_close_block(&mut s.block);
|
||||||
if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP {
|
if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP {
|
||||||
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect()));
|
s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec())));
|
||||||
}
|
}
|
||||||
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
|
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
|
||||||
if s.block.base.header.uncles_hash().is_zero() {
|
if s.block.base.header.uncles_hash().is_zero() {
|
||||||
s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
|
s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
|
||||||
}
|
}
|
||||||
if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP {
|
if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP {
|
||||||
s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect()));
|
s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec())));
|
||||||
}
|
}
|
||||||
|
|
||||||
s.block.base.header.set_state_root(s.block.state.root().clone());
|
s.block.base.header.set_state_root(s.block.state.root().clone());
|
||||||
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
|
s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
|
||||||
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
|
s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
|
||||||
|
|
||||||
LockedBlock {
|
ClosedBlock {
|
||||||
block: s.block,
|
block: s.block,
|
||||||
uncle_bytes: uncle_bytes,
|
uncle_bytes: uncle_bytes,
|
||||||
}
|
last_hashes: s.last_hashes,
|
||||||
|
}.lock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,7 +413,17 @@ impl ClosedBlock {
|
|||||||
pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) }
|
pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) }
|
||||||
|
|
||||||
/// Turn this into a `LockedBlock`, unable to be reopened again.
|
/// Turn this into a `LockedBlock`, unable to be reopened again.
|
||||||
pub fn lock(self) -> LockedBlock {
|
pub fn lock(mut self) -> LockedBlock {
|
||||||
|
// finalize the changes made by the engine.
|
||||||
|
self.block.state.clear_snapshot();
|
||||||
|
if let Err(e) = self.block.state.commit() {
|
||||||
|
warn!("Error committing closed block's state: {:?}", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// set the state root here, after commit recalculates with the block
|
||||||
|
// rewards.
|
||||||
|
self.block.base.header.set_state_root(self.block.state.root().clone());
|
||||||
|
|
||||||
LockedBlock {
|
LockedBlock {
|
||||||
block: self.block,
|
block: self.block,
|
||||||
uncle_bytes: self.uncle_bytes,
|
uncle_bytes: self.uncle_bytes,
|
||||||
@ -417,12 +431,12 @@ impl ClosedBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
|
/// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`.
|
||||||
pub fn reopen(self, engine: &Engine) -> OpenBlock {
|
pub fn reopen(mut self, engine: &Engine) -> OpenBlock {
|
||||||
// revert rewards (i.e. set state back at last transaction's state).
|
// revert rewards (i.e. set state back at last transaction's state).
|
||||||
let mut block = self.block;
|
self.block.state.revert_snapshot();
|
||||||
block.state = self.unclosed_state;
|
|
||||||
OpenBlock {
|
OpenBlock {
|
||||||
block: block,
|
block: self.block,
|
||||||
engine: engine,
|
engine: engine,
|
||||||
last_hashes: self.last_hashes,
|
last_hashes: self.last_hashes,
|
||||||
}
|
}
|
||||||
|
@ -17,14 +17,15 @@
|
|||||||
use crypto::sha2::Sha256 as Sha256Digest;
|
use crypto::sha2::Sha256 as Sha256Digest;
|
||||||
use crypto::ripemd160::Ripemd160 as Ripemd160Digest;
|
use crypto::ripemd160::Ripemd160 as Ripemd160Digest;
|
||||||
use crypto::digest::Digest;
|
use crypto::digest::Digest;
|
||||||
use util::*;
|
use std::cmp::min;
|
||||||
|
use util::{U256, H256, Hashable, FixedHash, BytesRef};
|
||||||
use ethkey::{Signature, recover as ec_recover};
|
use ethkey::{Signature, recover as ec_recover};
|
||||||
use ethjson;
|
use ethjson;
|
||||||
|
|
||||||
/// Native implementation of a built-in contract.
|
/// Native implementation of a built-in contract.
|
||||||
pub trait Impl: Send + Sync {
|
pub trait Impl: Send + Sync {
|
||||||
/// execute this built-in on the given input, writing to the given output.
|
/// execute this built-in on the given input, writing to the given output.
|
||||||
fn execute(&self, input: &[u8], out: &mut [u8]);
|
fn execute(&self, input: &[u8], output: &mut BytesRef);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A gas pricing scheme for built-in contracts.
|
/// A gas pricing scheme for built-in contracts.
|
||||||
@ -56,7 +57,7 @@ impl Builtin {
|
|||||||
pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) }
|
pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) }
|
||||||
|
|
||||||
/// Simple forwarder for execute.
|
/// Simple forwarder for execute.
|
||||||
pub fn execute(&self, input: &[u8], output: &mut[u8]) { self.native.execute(input, output) }
|
pub fn execute(&self, input: &[u8], output: &mut BytesRef) { self.native.execute(input, output) }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ethjson::spec::Builtin> for Builtin {
|
impl From<ethjson::spec::Builtin> for Builtin {
|
||||||
@ -108,14 +109,13 @@ struct Sha256;
|
|||||||
struct Ripemd160;
|
struct Ripemd160;
|
||||||
|
|
||||||
impl Impl for Identity {
|
impl Impl for Identity {
|
||||||
fn execute(&self, input: &[u8], output: &mut [u8]) {
|
fn execute(&self, input: &[u8], output: &mut BytesRef) {
|
||||||
let len = min(input.len(), output.len());
|
output.write(0, input);
|
||||||
output[..len].copy_from_slice(&input[..len]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Impl for EcRecover {
|
impl Impl for EcRecover {
|
||||||
fn execute(&self, i: &[u8], output: &mut [u8]) {
|
fn execute(&self, i: &[u8], output: &mut BytesRef) {
|
||||||
let len = min(i.len(), 128);
|
let len = min(i.len(), 128);
|
||||||
|
|
||||||
let mut input = [0; 128];
|
let mut input = [0; 128];
|
||||||
@ -135,58 +135,34 @@ impl Impl for EcRecover {
|
|||||||
if s.is_valid() {
|
if s.is_valid() {
|
||||||
if let Ok(p) = ec_recover(&s, &hash) {
|
if let Ok(p) = ec_recover(&s, &hash) {
|
||||||
let r = p.sha3();
|
let r = p.sha3();
|
||||||
|
output.write(0, &[0; 12]);
|
||||||
let out_len = min(output.len(), 32);
|
output.write(12, &r[12..r.len()]);
|
||||||
|
|
||||||
for x in &mut output[0.. min(12, out_len)] {
|
|
||||||
*x = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if out_len > 12 {
|
|
||||||
output[12..out_len].copy_from_slice(&r[12..out_len]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Impl for Sha256 {
|
impl Impl for Sha256 {
|
||||||
fn execute(&self, input: &[u8], output: &mut [u8]) {
|
fn execute(&self, input: &[u8], output: &mut BytesRef) {
|
||||||
let out_len = min(output.len(), 32);
|
|
||||||
|
|
||||||
let mut sha = Sha256Digest::new();
|
let mut sha = Sha256Digest::new();
|
||||||
sha.input(input);
|
sha.input(input);
|
||||||
|
|
||||||
if out_len == 32 {
|
let mut out = [0; 32];
|
||||||
sha.result(&mut output[0..32]);
|
sha.result(&mut out);
|
||||||
} else {
|
|
||||||
let mut out = [0; 32];
|
|
||||||
sha.result(&mut out);
|
|
||||||
|
|
||||||
output.copy_from_slice(&out[..out_len])
|
output.write(0, &out);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Impl for Ripemd160 {
|
impl Impl for Ripemd160 {
|
||||||
fn execute(&self, input: &[u8], output: &mut [u8]) {
|
fn execute(&self, input: &[u8], output: &mut BytesRef) {
|
||||||
let out_len = min(output.len(), 32);
|
|
||||||
|
|
||||||
let mut sha = Ripemd160Digest::new();
|
let mut sha = Ripemd160Digest::new();
|
||||||
sha.input(input);
|
sha.input(input);
|
||||||
|
|
||||||
for x in &mut output[0.. min(12, out_len)] {
|
let mut out = [0; 32];
|
||||||
*x = 0;
|
sha.result(&mut out[12..32]);
|
||||||
}
|
|
||||||
|
|
||||||
if out_len >= 32 {
|
output.write(0, &out);
|
||||||
sha.result(&mut output[12..32]);
|
|
||||||
} else if out_len > 12 {
|
|
||||||
let mut out = [0; 20];
|
|
||||||
sha.result(&mut out);
|
|
||||||
|
|
||||||
output.copy_from_slice(&out[12..out_len])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +170,7 @@ impl Impl for Ripemd160 {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::{Builtin, Linear, ethereum_builtin, Pricer};
|
use super::{Builtin, Linear, ethereum_builtin, Pricer};
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use util::U256;
|
use util::{U256, BytesRef};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn identity() {
|
fn identity() {
|
||||||
@ -203,15 +179,15 @@ mod tests {
|
|||||||
let i = [0u8, 1, 2, 3];
|
let i = [0u8, 1, 2, 3];
|
||||||
|
|
||||||
let mut o2 = [255u8; 2];
|
let mut o2 = [255u8; 2];
|
||||||
f.execute(&i[..], &mut o2[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o2[..]));
|
||||||
assert_eq!(i[0..2], o2);
|
assert_eq!(i[0..2], o2);
|
||||||
|
|
||||||
let mut o4 = [255u8; 4];
|
let mut o4 = [255u8; 4];
|
||||||
f.execute(&i[..], &mut o4[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o4[..]));
|
||||||
assert_eq!(i, o4);
|
assert_eq!(i, o4);
|
||||||
|
|
||||||
let mut o8 = [255u8; 8];
|
let mut o8 = [255u8; 8];
|
||||||
f.execute(&i[..], &mut o8[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
|
||||||
assert_eq!(i, o8[..4]);
|
assert_eq!(i, o8[..4]);
|
||||||
assert_eq!([255u8; 4], o8[4..]);
|
assert_eq!([255u8; 4], o8[4..]);
|
||||||
}
|
}
|
||||||
@ -224,16 +200,20 @@ mod tests {
|
|||||||
let i = [0u8; 0];
|
let i = [0u8; 0];
|
||||||
|
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i[..], &mut o[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]);
|
||||||
|
|
||||||
let mut o8 = [255u8; 8];
|
let mut o8 = [255u8; 8];
|
||||||
f.execute(&i[..], &mut o8[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
|
||||||
assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]);
|
assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]);
|
||||||
|
|
||||||
let mut o34 = [255u8; 34];
|
let mut o34 = [255u8; 34];
|
||||||
f.execute(&i[..], &mut o34[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..]));
|
||||||
assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]);
|
assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]);
|
||||||
|
|
||||||
|
let mut ov = vec![];
|
||||||
|
f.execute(&i[..], &mut BytesRef::Flexible(&mut ov));
|
||||||
|
assert_eq!(&ov[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -244,15 +224,15 @@ mod tests {
|
|||||||
let i = [0u8; 0];
|
let i = [0u8; 0];
|
||||||
|
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i[..], &mut o[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]);
|
||||||
|
|
||||||
let mut o8 = [255u8; 8];
|
let mut o8 = [255u8; 8];
|
||||||
f.execute(&i[..], &mut o8[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
|
||||||
assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]);
|
assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]);
|
||||||
|
|
||||||
let mut o34 = [255u8; 34];
|
let mut o34 = [255u8; 34];
|
||||||
f.execute(&i[..], &mut o34[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..]));
|
||||||
assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]);
|
assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,46 +252,46 @@ mod tests {
|
|||||||
let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
|
let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
|
||||||
|
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i[..], &mut o[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]);
|
||||||
|
|
||||||
let mut o8 = [255u8; 8];
|
let mut o8 = [255u8; 8];
|
||||||
f.execute(&i[..], &mut o8[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..]));
|
||||||
assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]);
|
assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]);
|
||||||
|
|
||||||
let mut o34 = [255u8; 34];
|
let mut o34 = [255u8; 34];
|
||||||
f.execute(&i[..], &mut o34[..]);
|
f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..]));
|
||||||
assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]);
|
assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]);
|
||||||
|
|
||||||
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
|
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i_bad[..], &mut o[..]);
|
f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
||||||
|
|
||||||
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap();
|
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap();
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i_bad[..], &mut o[..]);
|
f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
||||||
|
|
||||||
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap();
|
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap();
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i_bad[..], &mut o[..]);
|
f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
||||||
|
|
||||||
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap();
|
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap();
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i_bad[..], &mut o[..]);
|
f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
||||||
|
|
||||||
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap();
|
let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap();
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i_bad[..], &mut o[..]);
|
f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);
|
||||||
|
|
||||||
// TODO: Should this (corrupted version of the above) fail rather than returning some address?
|
// TODO: Should this (corrupted version of the above) fail rather than returning some address?
|
||||||
/* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
|
/* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
|
||||||
let mut o = [255u8; 32];
|
let mut o = [255u8; 32];
|
||||||
f.execute(&i_bad[..], &mut o[..]);
|
f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/
|
assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -336,7 +316,7 @@ mod tests {
|
|||||||
|
|
||||||
let i = [0u8, 1, 2, 3];
|
let i = [0u8, 1, 2, 3];
|
||||||
let mut o = [255u8; 4];
|
let mut o = [255u8; 4];
|
||||||
b.execute(&i[..], &mut o[..]);
|
b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(i, o);
|
assert_eq!(i, o);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,7 +337,7 @@ mod tests {
|
|||||||
|
|
||||||
let i = [0u8, 1, 2, 3];
|
let i = [0u8, 1, 2, 3];
|
||||||
let mut o = [255u8; 4];
|
let mut o = [255u8; 4];
|
||||||
b.execute(&i[..], &mut o[..]);
|
b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..]));
|
||||||
assert_eq!(i, o);
|
assert_eq!(i, o);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,9 @@ pub struct Client {
|
|||||||
factories: Factories,
|
factories: Factories,
|
||||||
}
|
}
|
||||||
|
|
||||||
const HISTORY: u64 = 1200;
|
/// The pruning constant -- how old blocks must be before we
|
||||||
|
/// assume finality of a given candidate.
|
||||||
|
pub const HISTORY: u64 = 1200;
|
||||||
|
|
||||||
/// Append a path element to the given path and return the string.
|
/// Append a path element to the given path and return the string.
|
||||||
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
|
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
|
||||||
@ -168,7 +170,7 @@ impl Client {
|
|||||||
|
|
||||||
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database)));
|
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database)));
|
||||||
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
|
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
|
||||||
let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())));
|
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
|
||||||
|
|
||||||
let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
|
let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
|
||||||
if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) {
|
if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) {
|
||||||
@ -685,7 +687,7 @@ impl snapshot::DatabaseRestore for Client {
|
|||||||
|
|
||||||
*state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE);
|
*state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE);
|
||||||
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
|
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
|
||||||
*tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from));
|
*tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -957,7 +959,7 @@ impl BlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn logs(&self, filter: Filter, limit: Option<usize>) -> Vec<LocalizedLogEntry> {
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
let blocks = filter.bloom_possibilities().iter()
|
let blocks = filter.bloom_possibilities().iter()
|
||||||
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
||||||
.flat_map(|m| m)
|
.flat_map(|m| m)
|
||||||
@ -966,7 +968,7 @@ impl BlockChainClient for Client {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.collect::<Vec<u64>>();
|
.collect::<Vec<u64>>();
|
||||||
|
|
||||||
self.chain.read().logs(blocks, |entry| filter.matches(entry), limit)
|
self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
||||||
|
@ -18,7 +18,7 @@ use std::str::FromStr;
|
|||||||
pub use std::time::Duration;
|
pub use std::time::Duration;
|
||||||
pub use block_queue::BlockQueueConfig;
|
pub use block_queue::BlockQueueConfig;
|
||||||
pub use blockchain::Config as BlockChainConfig;
|
pub use blockchain::Config as BlockChainConfig;
|
||||||
pub use trace::{Config as TraceConfig, Switch};
|
pub use trace::Config as TraceConfig;
|
||||||
pub use evm::VMType;
|
pub use evm::VMType;
|
||||||
pub use verification::VerifierType;
|
pub use verification::VerifierType;
|
||||||
use util::{journaldb, CompactionProfile};
|
use util::{journaldb, CompactionProfile};
|
||||||
@ -102,7 +102,7 @@ pub struct ClientConfig {
|
|||||||
/// State db compaction profile
|
/// State db compaction profile
|
||||||
pub db_compaction: DatabaseCompactionProfile,
|
pub db_compaction: DatabaseCompactionProfile,
|
||||||
/// Should db have WAL enabled?
|
/// Should db have WAL enabled?
|
||||||
pub db_wal: bool,
|
pub db_wal: bool,
|
||||||
/// Operating mode
|
/// Operating mode
|
||||||
pub mode: Mode,
|
pub mode: Mode,
|
||||||
/// Type of block verifier used by client.
|
/// Type of block verifier used by client.
|
||||||
|
@ -23,7 +23,7 @@ mod trace;
|
|||||||
mod client;
|
mod client;
|
||||||
|
|
||||||
pub use self::client::*;
|
pub use self::client::*;
|
||||||
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType};
|
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType};
|
||||||
pub use self::error::Error;
|
pub use self::error::Error;
|
||||||
pub use types::ids::*;
|
pub use types::ids::*;
|
||||||
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
||||||
|
@ -67,6 +67,8 @@ pub struct TestBlockChainClient {
|
|||||||
pub execution_result: RwLock<Option<Result<Executed, CallError>>>,
|
pub execution_result: RwLock<Option<Result<Executed, CallError>>>,
|
||||||
/// Transaction receipts.
|
/// Transaction receipts.
|
||||||
pub receipts: RwLock<HashMap<TransactionID, LocalizedReceipt>>,
|
pub receipts: RwLock<HashMap<TransactionID, LocalizedReceipt>>,
|
||||||
|
/// Logs
|
||||||
|
pub logs: RwLock<Vec<LocalizedLogEntry>>,
|
||||||
/// Block queue size.
|
/// Block queue size.
|
||||||
pub queue_size: AtomicUsize,
|
pub queue_size: AtomicUsize,
|
||||||
/// Miner
|
/// Miner
|
||||||
@ -114,6 +116,7 @@ impl TestBlockChainClient {
|
|||||||
code: RwLock::new(HashMap::new()),
|
code: RwLock::new(HashMap::new()),
|
||||||
execution_result: RwLock::new(None),
|
execution_result: RwLock::new(None),
|
||||||
receipts: RwLock::new(HashMap::new()),
|
receipts: RwLock::new(HashMap::new()),
|
||||||
|
logs: RwLock::new(Vec::new()),
|
||||||
queue_size: AtomicUsize::new(0),
|
queue_size: AtomicUsize::new(0),
|
||||||
miner: Arc::new(Miner::with_spec(&spec)),
|
miner: Arc::new(Miner::with_spec(&spec)),
|
||||||
spec: spec,
|
spec: spec,
|
||||||
@ -165,6 +168,11 @@ impl TestBlockChainClient {
|
|||||||
*self.latest_block_timestamp.write() = ts;
|
*self.latest_block_timestamp.write() = ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set logs to return for each logs call.
|
||||||
|
pub fn set_logs(&self, logs: Vec<LocalizedLogEntry>) {
|
||||||
|
*self.logs.write() = logs;
|
||||||
|
}
|
||||||
|
|
||||||
/// Add blocks to test client.
|
/// Add blocks to test client.
|
||||||
pub fn add_blocks(&self, count: usize, with: EachBlockWith) {
|
pub fn add_blocks(&self, count: usize, with: EachBlockWith) {
|
||||||
let len = self.numbers.read().len();
|
let len = self.numbers.read().len();
|
||||||
@ -390,8 +398,13 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn logs(&self, _filter: Filter, _limit: Option<usize>) -> Vec<LocalizedLogEntry> {
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
Vec::new()
|
let mut logs = self.logs.read().clone();
|
||||||
|
let len = logs.len();
|
||||||
|
match filter.limit {
|
||||||
|
Some(limit) if limit <= len => logs.split_off(len - limit),
|
||||||
|
_ => logs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn last_hashes(&self) -> LastHashes {
|
fn last_hashes(&self) -> LastHashes {
|
||||||
|
@ -156,7 +156,7 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>;
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>>;
|
||||||
|
|
||||||
/// Returns logs matching given filter.
|
/// Returns logs matching given filter.
|
||||||
fn logs(&self, filter: Filter, limit: Option<usize>) -> Vec<LocalizedLogEntry>;
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
||||||
|
|
||||||
/// Makes a non-persistent transaction call.
|
/// Makes a non-persistent transaction call.
|
||||||
fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result<Executed, CallError>;
|
fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result<Executed, CallError>;
|
||||||
@ -215,8 +215,11 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
/// Extended client interface used for mining
|
/// Extended client interface used for mining
|
||||||
pub trait MiningBlockChainClient : BlockChainClient {
|
pub trait MiningBlockChainClient : BlockChainClient {
|
||||||
/// Returns OpenBlock prepared for closing.
|
/// Returns OpenBlock prepared for closing.
|
||||||
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes)
|
fn prepare_open_block(&self,
|
||||||
-> OpenBlock;
|
author: Address,
|
||||||
|
gas_range_target: (U256, U256),
|
||||||
|
extra_data: Bytes
|
||||||
|
) -> OpenBlock;
|
||||||
|
|
||||||
/// Returns EvmFactory.
|
/// Returns EvmFactory.
|
||||||
fn vm_factory(&self) -> &EvmFactory;
|
fn vm_factory(&self) -> &EvmFactory;
|
||||||
|
@ -45,7 +45,7 @@ pub trait Engine : Sync + Send {
|
|||||||
fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
|
fn extra_info(&self, _header: &Header) -> HashMap<String, String> { HashMap::new() }
|
||||||
|
|
||||||
/// Additional information.
|
/// Additional information.
|
||||||
fn additional_params(&self) -> HashMap<String, String> { HashMap::new() }
|
fn additional_params(&self) -> HashMap<String, String> { HashMap::new() }
|
||||||
|
|
||||||
/// Get the general parameters of the chain.
|
/// Get the general parameters of the chain.
|
||||||
fn params(&self) -> &CommonParams;
|
fn params(&self) -> &CommonParams;
|
||||||
@ -126,7 +126,7 @@ pub trait Engine : Sync + Send {
|
|||||||
fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.builtins().get(a).unwrap().cost(input.len()) }
|
fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.builtins().get(a).unwrap().cost(input.len()) }
|
||||||
/// Execution the builtin contract `a` on `input` and return `output`.
|
/// Execution the builtin contract `a` on `input` and return `output`.
|
||||||
/// Panics if `is_builtin(a)` is not true.
|
/// Panics if `is_builtin(a)` is not true.
|
||||||
fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut [u8]) { self.builtins().get(a).unwrap().execute(input, output); }
|
fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut BytesRef) { self.builtins().get(a).unwrap().execute(input, output); }
|
||||||
|
|
||||||
// TODO: sealing stuff - though might want to leave this for later.
|
// TODO: sealing stuff - though might want to leave this for later.
|
||||||
}
|
}
|
||||||
|
@ -167,9 +167,7 @@ impl Engine for Ethash {
|
|||||||
for u in fields.uncles.iter() {
|
for u in fields.uncles.iter() {
|
||||||
fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)));
|
fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8)));
|
||||||
}
|
}
|
||||||
if let Err(e) = fields.state.commit() {
|
|
||||||
warn!("Encountered error on state commit: {}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
|
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
|
||||||
|
@ -113,7 +113,10 @@ impl<'a> Finalize for Result<GasLeft<'a>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256
|
/// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256
|
||||||
pub trait CostType: ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Output=Self> + ops::Sub<Output=Self> + ops::Shr<usize, Output=Self> + ops::Shl<usize, Output=Self> + cmp::Ord + Sized + From<usize> + Copy {
|
pub trait CostType: Sized + From<usize> + Copy
|
||||||
|
+ ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Output=Self> +ops::Sub<Output=Self>
|
||||||
|
+ ops::Shr<usize, Output=Self> + ops::Shl<usize, Output=Self>
|
||||||
|
+ cmp::Ord + fmt::Debug {
|
||||||
/// Converts this cost into `U256`
|
/// Converts this cost into `U256`
|
||||||
fn as_u256(&self) -> U256;
|
fn as_u256(&self) -> U256;
|
||||||
/// Tries to fit `U256` into this `Cost` type
|
/// Tries to fit `U256` into this `Cost` type
|
||||||
|
@ -83,6 +83,9 @@ pub trait Ext {
|
|||||||
/// Returns code at given address
|
/// Returns code at given address
|
||||||
fn extcode(&self, address: &Address) -> Bytes;
|
fn extcode(&self, address: &Address) -> Bytes;
|
||||||
|
|
||||||
|
/// Returns code size at given address
|
||||||
|
fn extcodesize(&self, address: &Address) -> usize;
|
||||||
|
|
||||||
/// Creates log entry with given topics and data
|
/// Creates log entry with given topics and data
|
||||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]);
|
fn log(&mut self, topics: Vec<H256>, data: &[u8]);
|
||||||
|
|
||||||
|
@ -53,6 +53,17 @@ fn color(instruction: Instruction, name: &'static str) -> String {
|
|||||||
type CodePosition = usize;
|
type CodePosition = usize;
|
||||||
type ProgramCounter = usize;
|
type ProgramCounter = usize;
|
||||||
|
|
||||||
|
const ONE: U256 = U256([1, 0, 0, 0]);
|
||||||
|
const TWO: U256 = U256([2, 0, 0, 0]);
|
||||||
|
const TWO_POW_5: U256 = U256([0x20, 0, 0, 0]);
|
||||||
|
const TWO_POW_8: U256 = U256([0x100, 0, 0, 0]);
|
||||||
|
const TWO_POW_16: U256 = U256([0x10000, 0, 0, 0]);
|
||||||
|
const TWO_POW_24: U256 = U256([0x1000000, 0, 0, 0]);
|
||||||
|
const TWO_POW_64: U256 = U256([0, 0x1, 0, 0]); // 0x1 00000000 00000000
|
||||||
|
const TWO_POW_96: U256 = U256([0, 0x100000000, 0, 0]); //0x1 00000000 00000000 00000000
|
||||||
|
const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000
|
||||||
|
const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000
|
||||||
|
|
||||||
/// Abstraction over raw vector of Bytes. Easier state management of PC.
|
/// Abstraction over raw vector of Bytes. Easier state management of PC.
|
||||||
struct CodeReader<'a> {
|
struct CodeReader<'a> {
|
||||||
position: ProgramCounter,
|
position: ProgramCounter,
|
||||||
@ -126,7 +137,7 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
|||||||
gasometer.current_gas = gasometer.current_gas - gas_cost;
|
gasometer.current_gas = gasometer.current_gas - gas_cost;
|
||||||
|
|
||||||
evm_debug!({
|
evm_debug!({
|
||||||
println!("[0x{:x}][{}(0x{:x}) Gas: {:x}\n Gas Before: {:x}",
|
println!("[0x{:x}][{}(0x{:x}) Gas: {:?}\n Gas Before: {:?}",
|
||||||
reader.position,
|
reader.position,
|
||||||
color(instruction, info.name),
|
color(instruction, info.name),
|
||||||
instruction,
|
instruction,
|
||||||
@ -471,7 +482,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
|||||||
},
|
},
|
||||||
instructions::EXTCODESIZE => {
|
instructions::EXTCODESIZE => {
|
||||||
let address = u256_to_address(&stack.pop_back());
|
let address = u256_to_address(&stack.pop_back());
|
||||||
let len = ext.extcode(&address).len();
|
let len = ext.extcodesize(&address);
|
||||||
stack.push(U256::from(len));
|
stack.push(U256::from(len));
|
||||||
},
|
},
|
||||||
instructions::CALLDATACOPY => {
|
instructions::CALLDATACOPY => {
|
||||||
@ -599,7 +610,19 @@ impl<Cost: CostType> Interpreter<Cost> {
|
|||||||
let a = stack.pop_back();
|
let a = stack.pop_back();
|
||||||
let b = stack.pop_back();
|
let b = stack.pop_back();
|
||||||
stack.push(if !self.is_zero(&b) {
|
stack.push(if !self.is_zero(&b) {
|
||||||
a.overflowing_div(b).0
|
match b {
|
||||||
|
ONE => a,
|
||||||
|
TWO => a >> 1,
|
||||||
|
TWO_POW_5 => a >> 5,
|
||||||
|
TWO_POW_8 => a >> 8,
|
||||||
|
TWO_POW_16 => a >> 16,
|
||||||
|
TWO_POW_24 => a >> 24,
|
||||||
|
TWO_POW_64 => a >> 64,
|
||||||
|
TWO_POW_96 => a >> 96,
|
||||||
|
TWO_POW_224 => a >> 224,
|
||||||
|
TWO_POW_248 => a >> 248,
|
||||||
|
_ => a.overflowing_div(b).0,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
U256::zero()
|
U256::zero()
|
||||||
});
|
});
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
use common::*;
|
use common::*;
|
||||||
use evmjit;
|
use evmjit;
|
||||||
use evm::{self, GasLeft};
|
use evm::{self, GasLeft};
|
||||||
|
use types::executed::CallType;
|
||||||
|
|
||||||
/// Should be used to convert jit types to ethcore
|
/// Should be used to convert jit types to ethcore
|
||||||
trait FromJit<T>: Sized {
|
trait FromJit<T>: Sized {
|
||||||
@ -77,10 +78,11 @@ impl IntoJit<evmjit::I256> for U256 {
|
|||||||
impl IntoJit<evmjit::I256> for H256 {
|
impl IntoJit<evmjit::I256> for H256 {
|
||||||
fn into_jit(self) -> evmjit::I256 {
|
fn into_jit(self) -> evmjit::I256 {
|
||||||
let mut ret = [0; 4];
|
let mut ret = [0; 4];
|
||||||
for i in 0..self.bytes().len() {
|
let len = self.len();
|
||||||
let rev = self.bytes().len() - 1 - i;
|
for i in 0..len {
|
||||||
|
let rev = len - 1 - i;
|
||||||
let pos = rev / 8;
|
let pos = rev / 8;
|
||||||
ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8);
|
ret[pos] += (self[i] as u64) << ((rev % 8) * 8);
|
||||||
}
|
}
|
||||||
evmjit::I256 { words: ret }
|
evmjit::I256 { words: ret }
|
||||||
}
|
}
|
||||||
@ -206,6 +208,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
|||||||
let sender_address = unsafe { Address::from_jit(&*sender_address) };
|
let sender_address = unsafe { Address::from_jit(&*sender_address) };
|
||||||
let receive_address = unsafe { Address::from_jit(&*receive_address) };
|
let receive_address = unsafe { Address::from_jit(&*receive_address) };
|
||||||
let code_address = unsafe { Address::from_jit(&*code_address) };
|
let code_address = unsafe { Address::from_jit(&*code_address) };
|
||||||
|
// TODO Is it always safe in case of DELEGATE_CALL?
|
||||||
let transfer_value = unsafe { U256::from_jit(&*transfer_value) };
|
let transfer_value = unsafe { U256::from_jit(&*transfer_value) };
|
||||||
let value = Some(transfer_value);
|
let value = Some(transfer_value);
|
||||||
|
|
||||||
@ -239,6 +242,12 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO [ToDr] Any way to detect DelegateCall?
|
||||||
|
let call_type = match is_callcode {
|
||||||
|
true => CallType::CallCode,
|
||||||
|
false => CallType::Call,
|
||||||
|
};
|
||||||
|
|
||||||
match self.ext.call(
|
match self.ext.call(
|
||||||
&call_gas,
|
&call_gas,
|
||||||
&sender_address,
|
&sender_address,
|
||||||
@ -246,7 +255,9 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
|||||||
value,
|
value,
|
||||||
unsafe { slice::from_raw_parts(in_beg, in_size as usize) },
|
unsafe { slice::from_raw_parts(in_beg, in_size as usize) },
|
||||||
&code_address,
|
&code_address,
|
||||||
unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) {
|
unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) },
|
||||||
|
call_type,
|
||||||
|
) {
|
||||||
evm::MessageCallResult::Success(gas_left) => unsafe {
|
evm::MessageCallResult::Success(gas_left) => unsafe {
|
||||||
*io_gas = (gas + gas_left).low_u64();
|
*io_gas = (gas + gas_left).low_u64();
|
||||||
true
|
true
|
||||||
|
@ -140,6 +140,10 @@ impl Ext for FakeExt {
|
|||||||
self.codes.get(address).unwrap_or(&Bytes::new()).clone()
|
self.codes.get(address).unwrap_or(&Bytes::new()).clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn extcodesize(&self, address: &Address) -> usize {
|
||||||
|
self.codes.get(address).map(|v| v.len()).unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
||||||
self.logs.push(FakeLogEntry {
|
self.logs.push(FakeLogEntry {
|
||||||
topics: topics,
|
topics: topics,
|
||||||
|
@ -193,7 +193,6 @@ impl<'a> Executive<'a> {
|
|||||||
data: Some(t.data.clone()),
|
data: Some(t.data.clone()),
|
||||||
call_type: CallType::Call,
|
call_type: CallType::Call,
|
||||||
};
|
};
|
||||||
// TODO: move output upstream
|
|
||||||
let mut out = vec![];
|
let mut out = vec![];
|
||||||
(self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out)
|
(self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out)
|
||||||
}
|
}
|
||||||
|
@ -205,6 +205,11 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
|
|||||||
self.state.code(address).unwrap_or_else(|| vec![])
|
self.state.code(address).unwrap_or_else(|| vec![])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn extcodesize(&self, address: &Address) -> usize {
|
||||||
|
self.state.code_size(address).unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||||
fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256>
|
fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256>
|
||||||
where Self: Sized {
|
where Self: Sized {
|
||||||
|
@ -131,6 +131,10 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer {
|
|||||||
self.ext.extcode(address)
|
self.ext.extcode(address)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn extcodesize(&self, address: &Address) -> usize {
|
||||||
|
self.ext.extcodesize(address)
|
||||||
|
}
|
||||||
|
|
||||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
||||||
self.ext.log(topics, data)
|
self.ext.log(topics, data)
|
||||||
}
|
}
|
||||||
|
@ -292,6 +292,7 @@ impl Miner {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut invalid_transactions = HashSet::new();
|
let mut invalid_transactions = HashSet::new();
|
||||||
|
let mut transactions_to_penalize = HashSet::new();
|
||||||
let block_number = open_block.block().fields().header.number();
|
let block_number = open_block.block().fields().header.number();
|
||||||
// TODO: push new uncles, too.
|
// TODO: push new uncles, too.
|
||||||
for tx in transactions {
|
for tx in transactions {
|
||||||
@ -299,6 +300,12 @@ impl Miner {
|
|||||||
match open_block.push_transaction(tx, None) {
|
match open_block.push_transaction(tx, None) {
|
||||||
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => {
|
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => {
|
||||||
debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas);
|
debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas);
|
||||||
|
|
||||||
|
// Penalize transaction if it's above current gas limit
|
||||||
|
if gas > gas_limit {
|
||||||
|
transactions_to_penalize.insert(hash);
|
||||||
|
}
|
||||||
|
|
||||||
// Exit early if gas left is smaller then min_tx_gas
|
// Exit early if gas left is smaller then min_tx_gas
|
||||||
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
|
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
|
||||||
if gas_limit - gas_used < min_tx_gas {
|
if gas_limit - gas_used < min_tx_gas {
|
||||||
@ -334,6 +341,9 @@ impl Miner {
|
|||||||
for hash in invalid_transactions.into_iter() {
|
for hash in invalid_transactions.into_iter() {
|
||||||
queue.remove_invalid(&hash, &fetch_account);
|
queue.remove_invalid(&hash, &fetch_account);
|
||||||
}
|
}
|
||||||
|
for hash in transactions_to_penalize {
|
||||||
|
queue.penalize(&hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
(block, original_work_hash)
|
(block, original_work_hash)
|
||||||
}
|
}
|
||||||
|
@ -134,6 +134,8 @@ struct TransactionOrder {
|
|||||||
hash: H256,
|
hash: H256,
|
||||||
/// Origin of the transaction
|
/// Origin of the transaction
|
||||||
origin: TransactionOrigin,
|
origin: TransactionOrigin,
|
||||||
|
/// Penalties
|
||||||
|
penalties: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -144,6 +146,7 @@ impl TransactionOrder {
|
|||||||
gas_price: tx.transaction.gas_price,
|
gas_price: tx.transaction.gas_price,
|
||||||
hash: tx.hash(),
|
hash: tx.hash(),
|
||||||
origin: tx.origin,
|
origin: tx.origin,
|
||||||
|
penalties: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,6 +154,11 @@ impl TransactionOrder {
|
|||||||
self.nonce_height = nonce - base_nonce;
|
self.nonce_height = nonce - base_nonce;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn penalize(mut self) -> Self {
|
||||||
|
self.penalties = self.penalties.saturating_add(1);
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eq for TransactionOrder {}
|
impl Eq for TransactionOrder {}
|
||||||
@ -167,6 +175,11 @@ impl PartialOrd for TransactionOrder {
|
|||||||
|
|
||||||
impl Ord for TransactionOrder {
|
impl Ord for TransactionOrder {
|
||||||
fn cmp(&self, b: &TransactionOrder) -> Ordering {
|
fn cmp(&self, b: &TransactionOrder) -> Ordering {
|
||||||
|
// First check number of penalties
|
||||||
|
if self.penalties != b.penalties {
|
||||||
|
return self.penalties.cmp(&b.penalties);
|
||||||
|
}
|
||||||
|
|
||||||
// First check nonce_height
|
// First check nonce_height
|
||||||
if self.nonce_height != b.nonce_height {
|
if self.nonce_height != b.nonce_height {
|
||||||
return self.nonce_height.cmp(&b.nonce_height);
|
return self.nonce_height.cmp(&b.nonce_height);
|
||||||
@ -387,7 +400,7 @@ pub struct AccountDetails {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
|
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
|
||||||
const GAS_LIMIT_HYSTERESIS: usize = 10; // %
|
const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) %
|
||||||
|
|
||||||
/// `TransactionQueue` implementation
|
/// `TransactionQueue` implementation
|
||||||
pub struct TransactionQueue {
|
pub struct TransactionQueue {
|
||||||
@ -506,8 +519,6 @@ impl TransactionQueue {
|
|||||||
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result<TransactionImportResult, Error>
|
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result<TransactionImportResult, Error>
|
||||||
where T: Fn(&Address) -> AccountDetails {
|
where T: Fn(&Address) -> AccountDetails {
|
||||||
|
|
||||||
trace!(target: "txqueue", "Importing: {:?}", tx.hash());
|
|
||||||
|
|
||||||
if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local {
|
if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local {
|
||||||
trace!(target: "txqueue",
|
trace!(target: "txqueue",
|
||||||
"Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})",
|
"Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})",
|
||||||
@ -593,6 +604,39 @@ impl TransactionQueue {
|
|||||||
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
|
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Penalize transactions from sender of transaction with given hash.
|
||||||
|
/// I.e. it should change the priority of the transaction in the queue.
|
||||||
|
///
|
||||||
|
/// NOTE: We need to penalize all transactions from particular sender
|
||||||
|
/// to avoid breaking invariants in queue (ordered by nonces).
|
||||||
|
/// Consecutive transactions from this sender would fail otherwise (because of invalid nonce).
|
||||||
|
pub fn penalize(&mut self, transaction_hash: &H256) {
|
||||||
|
let transaction = match self.by_hash.get(transaction_hash) {
|
||||||
|
None => return,
|
||||||
|
Some(t) => t,
|
||||||
|
};
|
||||||
|
let sender = transaction.sender();
|
||||||
|
|
||||||
|
// Penalize all transactions from this sender
|
||||||
|
let nonces_from_sender = match self.current.by_address.row(&sender) {
|
||||||
|
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
|
||||||
|
None => vec![],
|
||||||
|
};
|
||||||
|
for k in nonces_from_sender {
|
||||||
|
let order = self.current.drop(&sender, &k).unwrap();
|
||||||
|
self.current.insert(sender, k, order.penalize());
|
||||||
|
}
|
||||||
|
// Same thing for future
|
||||||
|
let nonces_from_sender = match self.future.by_address.row(&sender) {
|
||||||
|
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
|
||||||
|
None => vec![],
|
||||||
|
};
|
||||||
|
for k in nonces_from_sender {
|
||||||
|
let order = self.future.drop(&sender, &k).unwrap();
|
||||||
|
self.current.insert(sender, k, order.penalize());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Removes invalid transaction identified by hash from queue.
|
/// Removes invalid transaction identified by hash from queue.
|
||||||
/// Assumption is that this transaction nonce is not related to client nonce,
|
/// Assumption is that this transaction nonce is not related to client nonce,
|
||||||
/// so transactions left in queue are processed according to client nonce.
|
/// so transactions left in queue are processed according to client nonce.
|
||||||
@ -764,6 +808,7 @@ impl TransactionQueue {
|
|||||||
|
|
||||||
let address = tx.sender();
|
let address = tx.sender();
|
||||||
let nonce = tx.nonce();
|
let nonce = tx.nonce();
|
||||||
|
let hash = tx.hash();
|
||||||
|
|
||||||
let next_nonce = self.last_nonces
|
let next_nonce = self.last_nonces
|
||||||
.get(&address)
|
.get(&address)
|
||||||
@ -785,6 +830,9 @@ impl TransactionQueue {
|
|||||||
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash)));
|
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash)));
|
||||||
// Return an error if this transaction is not imported because of limit.
|
// Return an error if this transaction is not imported because of limit.
|
||||||
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
|
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
|
||||||
|
|
||||||
|
debug!(target: "txqueue", "Importing transaction to future: {:?}", hash);
|
||||||
|
debug!(target: "txqueue", "status: {:?}", self.status());
|
||||||
return Ok(TransactionImportResult::Future);
|
return Ok(TransactionImportResult::Future);
|
||||||
}
|
}
|
||||||
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
|
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
|
||||||
@ -811,7 +859,8 @@ impl TransactionQueue {
|
|||||||
// Trigger error if the transaction we are importing was removed.
|
// Trigger error if the transaction we are importing was removed.
|
||||||
try!(check_if_removed(&address, &nonce, removed));
|
try!(check_if_removed(&address, &nonce, removed));
|
||||||
|
|
||||||
trace!(target: "txqueue", "status: {:?}", self.status());
|
debug!(target: "txqueue", "Imported transaction to current: {:?}", hash);
|
||||||
|
debug!(target: "txqueue", "status: {:?}", self.status());
|
||||||
Ok(TransactionImportResult::Current)
|
Ok(TransactionImportResult::Current)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -945,6 +994,17 @@ mod test {
|
|||||||
(tx1.sign(secret), tx2.sign(secret))
|
(tx1.sign(secret), tx2.sign(secret))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns two consecutive transactions, both with increased gas price
|
||||||
|
fn new_tx_pair_with_gas_price_increment(gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) {
|
||||||
|
let gas = default_gas_price() + gas_price_increment;
|
||||||
|
let tx1 = new_unsigned_tx(default_nonce(), gas);
|
||||||
|
let tx2 = new_unsigned_tx(default_nonce() + 1.into(), gas);
|
||||||
|
|
||||||
|
let keypair = Random.generate().unwrap();
|
||||||
|
let secret = &keypair.secret();
|
||||||
|
(tx1.sign(secret), tx2.sign(secret))
|
||||||
|
}
|
||||||
|
|
||||||
fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) {
|
fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) {
|
||||||
new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment)
|
new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment)
|
||||||
}
|
}
|
||||||
@ -1332,6 +1392,39 @@ mod test {
|
|||||||
assert_eq!(top.len(), 2);
|
assert_eq!(top.len(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_penalize_transactions_from_sender() {
|
||||||
|
// given
|
||||||
|
let mut txq = TransactionQueue::new();
|
||||||
|
// txa, txb - slightly bigger gas price to have consistent ordering
|
||||||
|
let (txa, txb) = new_tx_pair_default(1.into(), 0.into());
|
||||||
|
let (tx1, tx2) = new_tx_pair_with_gas_price_increment(3.into());
|
||||||
|
|
||||||
|
// insert everything
|
||||||
|
txq.add(txa.clone(), &default_account_details, TransactionOrigin::External).unwrap();
|
||||||
|
txq.add(txb.clone(), &default_account_details, TransactionOrigin::External).unwrap();
|
||||||
|
txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap();
|
||||||
|
txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap();
|
||||||
|
|
||||||
|
let top = txq.top_transactions();
|
||||||
|
assert_eq!(top[0], tx1);
|
||||||
|
assert_eq!(top[1], txa);
|
||||||
|
assert_eq!(top[2], tx2);
|
||||||
|
assert_eq!(top[3], txb);
|
||||||
|
assert_eq!(top.len(), 4);
|
||||||
|
|
||||||
|
// when
|
||||||
|
txq.penalize(&tx1.hash());
|
||||||
|
|
||||||
|
// then
|
||||||
|
let top = txq.top_transactions();
|
||||||
|
assert_eq!(top[0], txa);
|
||||||
|
assert_eq!(top[1], txb);
|
||||||
|
assert_eq!(top[2], tx1);
|
||||||
|
assert_eq!(top[3], tx2);
|
||||||
|
assert_eq!(top.len(), 4);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_return_pending_hashes() {
|
fn should_return_pending_hashes() {
|
||||||
// given
|
// given
|
||||||
|
@ -92,7 +92,8 @@ impl Account {
|
|||||||
|
|
||||||
let mut pairs = Vec::new();
|
let mut pairs = Vec::new();
|
||||||
|
|
||||||
for (k, v) in db.iter() {
|
for item in try!(db.iter()) {
|
||||||
|
let (k, v) = try!(item);
|
||||||
pairs.push((k, v));
|
pairs.push((k, v));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,10 +21,10 @@ use header::Header;
|
|||||||
|
|
||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View};
|
use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View};
|
||||||
use rlp::{Compressible, RlpType};
|
|
||||||
use util::{Bytes, Hashable, H256};
|
use util::{Bytes, Hashable, H256};
|
||||||
|
use util::triehash::ordered_trie_root;
|
||||||
|
|
||||||
const HEADER_FIELDS: usize = 10;
|
const HEADER_FIELDS: usize = 8;
|
||||||
const BLOCK_FIELDS: usize = 2;
|
const BLOCK_FIELDS: usize = 2;
|
||||||
|
|
||||||
pub struct AbridgedBlock {
|
pub struct AbridgedBlock {
|
||||||
@ -61,8 +61,6 @@ impl AbridgedBlock {
|
|||||||
stream
|
stream
|
||||||
.append(&header.author())
|
.append(&header.author())
|
||||||
.append(&header.state_root())
|
.append(&header.state_root())
|
||||||
.append(&header.transactions_root())
|
|
||||||
.append(&header.receipts_root())
|
|
||||||
.append(&header.log_bloom())
|
.append(&header.log_bloom())
|
||||||
.append(&header.difficulty())
|
.append(&header.difficulty())
|
||||||
.append(&header.gas_limit())
|
.append(&header.gas_limit())
|
||||||
@ -79,33 +77,35 @@ impl AbridgedBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
AbridgedBlock {
|
AbridgedBlock {
|
||||||
rlp: UntrustedRlp::new(stream.as_raw()).compress(RlpType::Blocks).to_vec(),
|
rlp: stream.out(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flesh out an abridged block view with the provided parent hash and block number.
|
/// Flesh out an abridged block view with the provided parent hash and block number.
|
||||||
///
|
///
|
||||||
/// Will fail if contains invalid rlp.
|
/// Will fail if contains invalid rlp.
|
||||||
pub fn to_block(&self, parent_hash: H256, number: u64) -> Result<Block, DecoderError> {
|
pub fn to_block(&self, parent_hash: H256, number: u64, receipts_root: H256) -> Result<Block, DecoderError> {
|
||||||
let rlp = UntrustedRlp::new(&self.rlp).decompress(RlpType::Blocks);
|
let rlp = UntrustedRlp::new(&self.rlp);
|
||||||
let rlp = UntrustedRlp::new(&rlp);
|
|
||||||
|
|
||||||
let mut header: Header = Default::default();
|
let mut header: Header = Default::default();
|
||||||
header.set_parent_hash(parent_hash);
|
header.set_parent_hash(parent_hash);
|
||||||
header.set_author(try!(rlp.val_at(0)));
|
header.set_author(try!(rlp.val_at(0)));
|
||||||
header.set_state_root(try!(rlp.val_at(1)));
|
header.set_state_root(try!(rlp.val_at(1)));
|
||||||
header.set_transactions_root(try!(rlp.val_at(2)));
|
header.set_log_bloom(try!(rlp.val_at(2)));
|
||||||
header.set_receipts_root(try!(rlp.val_at(3)));
|
header.set_difficulty(try!(rlp.val_at(3)));
|
||||||
header.set_log_bloom(try!(rlp.val_at(4)));
|
|
||||||
header.set_difficulty(try!(rlp.val_at(5)));
|
|
||||||
header.set_number(number);
|
header.set_number(number);
|
||||||
header.set_gas_limit(try!(rlp.val_at(6)));
|
header.set_gas_limit(try!(rlp.val_at(4)));
|
||||||
header.set_gas_used(try!(rlp.val_at(7)));
|
header.set_gas_used(try!(rlp.val_at(5)));
|
||||||
header.set_timestamp(try!(rlp.val_at(8)));
|
header.set_timestamp(try!(rlp.val_at(6)));
|
||||||
header.set_extra_data(try!(rlp.val_at(9)));
|
header.set_extra_data(try!(rlp.val_at(7)));
|
||||||
|
|
||||||
let transactions = try!(rlp.val_at(10));
|
let transactions = try!(rlp.val_at(8));
|
||||||
let uncles: Vec<Header> = try!(rlp.val_at(11));
|
let uncles: Vec<Header> = try!(rlp.val_at(9));
|
||||||
|
|
||||||
|
header.set_transactions_root(ordered_trie_root(
|
||||||
|
try!(rlp.at(8)).iter().map(|r| r.as_raw().to_owned())
|
||||||
|
));
|
||||||
|
header.set_receipts_root(receipts_root);
|
||||||
|
|
||||||
let mut uncles_rlp = RlpStream::new();
|
let mut uncles_rlp = RlpStream::new();
|
||||||
uncles_rlp.append(&uncles);
|
uncles_rlp.append(&uncles);
|
||||||
@ -143,20 +143,22 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn empty_block_abridging() {
|
fn empty_block_abridging() {
|
||||||
let b = Block::default();
|
let b = Block::default();
|
||||||
|
let receipts_root = b.header.receipts_root().clone();
|
||||||
let encoded = encode_block(&b);
|
let encoded = encode_block(&b);
|
||||||
|
|
||||||
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
|
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
|
||||||
assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b);
|
assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
#[should_panic]
|
||||||
fn wrong_number() {
|
fn wrong_number() {
|
||||||
let b = Block::default();
|
let b = Block::default();
|
||||||
|
let receipts_root = b.header.receipts_root().clone();
|
||||||
let encoded = encode_block(&b);
|
let encoded = encode_block(&b);
|
||||||
|
|
||||||
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
|
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
|
||||||
assert_eq!(abridged.to_block(H256::new(), 2).unwrap(), b);
|
assert_eq!(abridged.to_block(H256::new(), 2, receipts_root).unwrap(), b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -184,9 +186,14 @@ mod tests {
|
|||||||
b.transactions.push(t1);
|
b.transactions.push(t1);
|
||||||
b.transactions.push(t2);
|
b.transactions.push(t2);
|
||||||
|
|
||||||
|
let receipts_root = b.header.receipts_root().clone();
|
||||||
|
b.header.set_transactions_root(::util::triehash::ordered_trie_root(
|
||||||
|
b.transactions.iter().map(::rlp::encode).map(|out| out.to_vec())
|
||||||
|
));
|
||||||
|
|
||||||
let encoded = encode_block(&b);
|
let encoded = encode_block(&b);
|
||||||
|
|
||||||
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..]));
|
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..]));
|
||||||
assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b);
|
assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,9 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! Snapshot creation, restoration, and network service.
|
//! Snapshot creation, restoration, and network service.
|
||||||
|
//!
|
||||||
|
//! Documentation of the format can be found at
|
||||||
|
//! https://github.com/ethcore/parity/wiki/%22PV64%22-Snapshot-Format
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet, VecDeque};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -34,7 +37,7 @@ use util::journaldb::{self, Algorithm, JournalDB};
|
|||||||
use util::kvdb::Database;
|
use util::kvdb::Database;
|
||||||
use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut};
|
use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut};
|
||||||
use util::sha3::SHA3_NULL_RLP;
|
use util::sha3::SHA3_NULL_RLP;
|
||||||
use rlp::{RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType};
|
use rlp::{RlpStream, Stream, UntrustedRlp, View};
|
||||||
|
|
||||||
use self::account::Account;
|
use self::account::Account;
|
||||||
use self::block::AbridgedBlock;
|
use self::block::AbridgedBlock;
|
||||||
@ -358,15 +361,15 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex<SnapshotWriter +
|
|||||||
let mut used_code = HashSet::new();
|
let mut used_code = HashSet::new();
|
||||||
|
|
||||||
// account_key here is the address' hash.
|
// account_key here is the address' hash.
|
||||||
for (account_key, account_data) in account_trie.iter() {
|
for item in try!(account_trie.iter()) {
|
||||||
|
let (account_key, account_data) = try!(item);
|
||||||
let account = Account::from_thin_rlp(account_data);
|
let account = Account::from_thin_rlp(account_data);
|
||||||
let account_key_hash = H256::from_slice(&account_key);
|
let account_key_hash = H256::from_slice(&account_key);
|
||||||
|
|
||||||
let account_db = AccountDB::from_hash(db, account_key_hash);
|
let account_db = AccountDB::from_hash(db, account_key_hash);
|
||||||
|
|
||||||
let fat_rlp = try!(account.to_fat_rlp(&account_db, &mut used_code));
|
let fat_rlp = try!(account.to_fat_rlp(&account_db, &mut used_code));
|
||||||
let compressed_rlp = UntrustedRlp::new(&fat_rlp).compress(RlpType::Snapshot).to_vec();
|
try!(chunker.push(account_key, fat_rlp));
|
||||||
try!(chunker.push(account_key, compressed_rlp));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if chunker.cur_size != 0 {
|
if chunker.cur_size != 0 {
|
||||||
@ -507,8 +510,7 @@ fn rebuild_accounts(
|
|||||||
let account_rlp = UntrustedRlp::new(account_pair);
|
let account_rlp = UntrustedRlp::new(account_pair);
|
||||||
|
|
||||||
let hash: H256 = try!(account_rlp.val_at(0));
|
let hash: H256 = try!(account_rlp.val_at(0));
|
||||||
let decompressed = try!(account_rlp.at(1)).decompress(RlpType::Snapshot);
|
let fat_rlp = try!(account_rlp.at(1));
|
||||||
let fat_rlp = UntrustedRlp::new(&decompressed[..]);
|
|
||||||
|
|
||||||
let thin_rlp = {
|
let thin_rlp = {
|
||||||
let mut acct_db = AccountDBMut::from_hash(db, hash);
|
let mut acct_db = AccountDBMut::from_hash(db, hash);
|
||||||
@ -569,6 +571,7 @@ impl BlockRebuilder {
|
|||||||
pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result<u64, ::error::Error> {
|
pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result<u64, ::error::Error> {
|
||||||
use basic_types::Seal::With;
|
use basic_types::Seal::With;
|
||||||
use util::U256;
|
use util::U256;
|
||||||
|
use util::triehash::ordered_trie_root;
|
||||||
|
|
||||||
let rlp = UntrustedRlp::new(chunk);
|
let rlp = UntrustedRlp::new(chunk);
|
||||||
let item_count = rlp.item_count();
|
let item_count = rlp.item_count();
|
||||||
@ -585,7 +588,11 @@ impl BlockRebuilder {
|
|||||||
let abridged_rlp = try!(pair.at(0)).as_raw().to_owned();
|
let abridged_rlp = try!(pair.at(0)).as_raw().to_owned();
|
||||||
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
|
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
|
||||||
let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1));
|
let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1));
|
||||||
let block = try!(abridged_block.to_block(parent_hash, cur_number));
|
let receipts_root = ordered_trie_root(
|
||||||
|
try!(pair.at(1)).iter().map(|r| r.as_raw().to_owned())
|
||||||
|
);
|
||||||
|
|
||||||
|
let block = try!(abridged_block.to_block(parent_hash, cur_number, receipts_root));
|
||||||
let block_bytes = block.rlp_bytes(With);
|
let block_bytes = block.rlp_bytes(With);
|
||||||
|
|
||||||
if self.rng.gen::<f32>() <= POW_VERIFY_RATE {
|
if self.rng.gen::<f32>() <= POW_VERIFY_RATE {
|
||||||
|
@ -27,7 +27,7 @@ use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, Sna
|
|||||||
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
|
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
|
||||||
|
|
||||||
use blockchain::BlockChain;
|
use blockchain::BlockChain;
|
||||||
use client::Client;
|
use client::{BlockChainClient, Client};
|
||||||
use engines::Engine;
|
use engines::Engine;
|
||||||
use error::Error;
|
use error::Error;
|
||||||
use ids::BlockID;
|
use ids::BlockID;
|
||||||
@ -345,7 +345,17 @@ impl Service {
|
|||||||
let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress);
|
let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress);
|
||||||
|
|
||||||
self.taking_snapshot.store(false, Ordering::SeqCst);
|
self.taking_snapshot.store(false, Ordering::SeqCst);
|
||||||
try!(res);
|
if let Err(e) = res {
|
||||||
|
if client.chain_info().best_block_number >= num + ::client::HISTORY {
|
||||||
|
// "Cancelled" is mincing words a bit -- what really happened
|
||||||
|
// is that the state we were snapshotting got pruned out
|
||||||
|
// before we could finish.
|
||||||
|
info!("Cancelled prematurely-started periodic snapshot.");
|
||||||
|
return Ok(())
|
||||||
|
} else {
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
info!("Finished taking snapshot at #{}", num);
|
info!("Finished taking snapshot at #{}", num);
|
||||||
|
|
||||||
|
@ -52,8 +52,9 @@ impl StateProducer {
|
|||||||
// modify existing accounts.
|
// modify existing accounts.
|
||||||
let mut accounts_to_modify: Vec<_> = {
|
let mut accounts_to_modify: Vec<_> = {
|
||||||
let trie = TrieDB::new(&*db, &self.state_root).unwrap();
|
let trie = TrieDB::new(&*db, &self.state_root).unwrap();
|
||||||
let temp = trie.iter() // binding required due to complicated lifetime stuff
|
let temp = trie.iter().unwrap() // binding required due to complicated lifetime stuff
|
||||||
.filter(|_| rng.gen::<f32>() < ACCOUNT_CHURN)
|
.filter(|_| rng.gen::<f32>() < ACCOUNT_CHURN)
|
||||||
|
.map(Result::unwrap)
|
||||||
.map(|(k, v)| (H256::from_slice(&k), v.to_owned()))
|
.map(|(k, v)| (H256::from_slice(&k), v.to_owned()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
@ -191,10 +191,16 @@ impl State {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mutate storage of account `a` so that it is `value` for `key`.
|
/// Get the code of account `a`.
|
||||||
pub fn code(&self, a: &Address) -> Option<Bytes> {
|
pub fn code(&self, a: &Address) -> Option<Bytes> {
|
||||||
self.ensure_cached(a, true,
|
self.ensure_cached(a, true,
|
||||||
|a| a.as_ref().map_or(None, |a|a.code().map(|x|x.to_vec())))
|
|a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec())))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the code size of account `a`.
|
||||||
|
pub fn code_size(&self, a: &Address) -> Option<usize> {
|
||||||
|
self.ensure_cached(a, true,
|
||||||
|
|a| a.as_ref().map_or(None, |a| a.code().map(|x| x.len())))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add `incr` to the balance of account `a`.
|
/// Add `incr` to the balance of account `a`.
|
||||||
@ -420,10 +426,27 @@ impl fmt::Debug for State {
|
|||||||
|
|
||||||
impl Clone for State {
|
impl Clone for State {
|
||||||
fn clone(&self) -> State {
|
fn clone(&self) -> State {
|
||||||
|
let cache = {
|
||||||
|
let mut cache = HashMap::new();
|
||||||
|
for (key, val) in self.cache.borrow().iter() {
|
||||||
|
let key = key.clone();
|
||||||
|
match *val {
|
||||||
|
Some(ref acc) if acc.is_dirty() => {
|
||||||
|
cache.insert(key, Some(acc.clone()));
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
cache.insert(key, None);
|
||||||
|
},
|
||||||
|
_ => {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cache
|
||||||
|
};
|
||||||
|
|
||||||
State {
|
State {
|
||||||
db: self.db.boxed_clone(),
|
db: self.db.boxed_clone(),
|
||||||
root: self.root.clone(),
|
root: self.root.clone(),
|
||||||
cache: RefCell::new(self.cache.borrow().clone()),
|
cache: RefCell::new(cache),
|
||||||
snapshots: RefCell::new(self.snapshots.borrow().clone()),
|
snapshots: RefCell::new(self.snapshots.borrow().clone()),
|
||||||
account_start_nonce: self.account_start_nonce.clone(),
|
account_start_nonce: self.account_start_nonce.clone(),
|
||||||
factories: self.factories.clone(),
|
factories: self.factories.clone(),
|
||||||
|
@ -142,7 +142,8 @@ fn returns_logs() {
|
|||||||
to_block: BlockID::Latest,
|
to_block: BlockID::Latest,
|
||||||
address: None,
|
address: None,
|
||||||
topics: vec![],
|
topics: vec![],
|
||||||
}, None);
|
limit: None,
|
||||||
|
});
|
||||||
assert_eq!(logs.len(), 0);
|
assert_eq!(logs.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,7 +157,8 @@ fn returns_logs_with_limit() {
|
|||||||
to_block: BlockID::Latest,
|
to_block: BlockID::Latest,
|
||||||
address: None,
|
address: None,
|
||||||
topics: vec![],
|
topics: vec![],
|
||||||
}, Some(2));
|
limit: Some(2),
|
||||||
|
});
|
||||||
assert_eq!(logs.len(), 0);
|
assert_eq!(logs.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,57 +15,14 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! Traces config.
|
//! Traces config.
|
||||||
use std::str::FromStr;
|
|
||||||
use bloomchain::Config as BloomConfig;
|
use bloomchain::Config as BloomConfig;
|
||||||
use trace::Error;
|
|
||||||
|
|
||||||
/// 3-value enum.
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
|
||||||
pub enum Switch {
|
|
||||||
/// True.
|
|
||||||
On,
|
|
||||||
/// False.
|
|
||||||
Off,
|
|
||||||
/// Auto.
|
|
||||||
Auto,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Switch {
|
|
||||||
fn default() -> Self {
|
|
||||||
Switch::Auto
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for Switch {
|
|
||||||
type Err = String;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
match s {
|
|
||||||
"on" => Ok(Switch::On),
|
|
||||||
"off" => Ok(Switch::Off),
|
|
||||||
"auto" => Ok(Switch::Auto),
|
|
||||||
other => Err(format!("Invalid switch value: {}", other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Switch {
|
|
||||||
/// Tries to turn old switch to new value.
|
|
||||||
pub fn turn_to(&self, to: Switch) -> Result<bool, Error> {
|
|
||||||
match (*self, to) {
|
|
||||||
(Switch::On, Switch::On) | (Switch::On, Switch::Auto) | (Switch::Auto, Switch::On) => Ok(true),
|
|
||||||
(Switch::Off, Switch::On) => Err(Error::ResyncRequired),
|
|
||||||
_ => Ok(false),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Traces config.
|
/// Traces config.
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// Indicates if tracing should be enabled or not.
|
/// Indicates if tracing should be enabled or not.
|
||||||
/// If it's None, it will be automatically configured.
|
/// If it's None, it will be automatically configured.
|
||||||
pub enabled: Switch,
|
pub enabled: bool,
|
||||||
/// Traces blooms configuration.
|
/// Traces blooms configuration.
|
||||||
pub blooms: BloomConfig,
|
pub blooms: BloomConfig,
|
||||||
/// Preferef cache-size.
|
/// Preferef cache-size.
|
||||||
@ -77,7 +34,7 @@ pub struct Config {
|
|||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Config {
|
Config {
|
||||||
enabled: Switch::default(),
|
enabled: false,
|
||||||
blooms: BloomConfig {
|
blooms: BloomConfig {
|
||||||
levels: 3,
|
levels: 3,
|
||||||
elements_per_index: 16,
|
elements_per_index: 16,
|
||||||
@ -87,20 +44,3 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::Switch;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_switch_parsing() {
|
|
||||||
assert_eq!(Switch::On, "on".parse().unwrap());
|
|
||||||
assert_eq!(Switch::Off, "off".parse().unwrap());
|
|
||||||
assert_eq!(Switch::Auto, "auto".parse().unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_switch_default() {
|
|
||||||
assert_eq!(Switch::default(), Switch::Auto);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -22,7 +22,7 @@ use bloomchain::{Number, Config as BloomConfig};
|
|||||||
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
|
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
|
||||||
use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf};
|
use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf};
|
||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error};
|
use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras};
|
||||||
use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
|
use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
|
||||||
use blooms;
|
use blooms;
|
||||||
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
||||||
@ -126,38 +126,20 @@ impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
|
|||||||
|
|
||||||
impl<T> TraceDB<T> where T: DatabaseExtras {
|
impl<T> TraceDB<T> where T: DatabaseExtras {
|
||||||
/// Creates new instance of `TraceDB`.
|
/// Creates new instance of `TraceDB`.
|
||||||
pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Result<Self, Error> {
|
pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Self {
|
||||||
// check if in previously tracing was enabled
|
|
||||||
let old_tracing = match tracesdb.get(db::COL_TRACE, b"enabled").unwrap() {
|
|
||||||
Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
|
|
||||||
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
|
|
||||||
Some(_) => { panic!("tracesdb is corrupted") },
|
|
||||||
None => Switch::Auto,
|
|
||||||
};
|
|
||||||
|
|
||||||
let enabled = try!(old_tracing.turn_to(config.enabled));
|
|
||||||
|
|
||||||
let encoded_tracing = match enabled {
|
|
||||||
true => [0x1],
|
|
||||||
false => [0x0]
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut batch = DBTransaction::new(&tracesdb);
|
let mut batch = DBTransaction::new(&tracesdb);
|
||||||
batch.put(db::COL_TRACE, b"enabled", &encoded_tracing);
|
|
||||||
batch.put(db::COL_TRACE, b"version", TRACE_DB_VER);
|
batch.put(db::COL_TRACE, b"version", TRACE_DB_VER);
|
||||||
tracesdb.write(batch).unwrap();
|
tracesdb.write(batch).unwrap();
|
||||||
|
|
||||||
let db = TraceDB {
|
TraceDB {
|
||||||
traces: RwLock::new(HashMap::new()),
|
traces: RwLock::new(HashMap::new()),
|
||||||
blooms: RwLock::new(HashMap::new()),
|
blooms: RwLock::new(HashMap::new()),
|
||||||
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
|
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
|
||||||
tracesdb: tracesdb,
|
tracesdb: tracesdb,
|
||||||
bloom_config: config.blooms,
|
bloom_config: config.blooms,
|
||||||
enabled: enabled,
|
enabled: config.enabled,
|
||||||
extras: extras,
|
extras: extras,
|
||||||
};
|
}
|
||||||
|
|
||||||
Ok(db)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cache_size(&self) -> usize {
|
fn cache_size(&self) -> usize {
|
||||||
@ -419,7 +401,7 @@ mod tests {
|
|||||||
use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
|
use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
|
||||||
use devtools::RandomTempPath;
|
use devtools::RandomTempPath;
|
||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
|
use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
|
||||||
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
|
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
|
||||||
use trace::trace::{Call, Action, Res};
|
use trace::trace::{Call, Action, Res};
|
||||||
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
||||||
@ -474,22 +456,10 @@ mod tests {
|
|||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
|
|
||||||
// set autotracing
|
// set autotracing
|
||||||
config.enabled = Switch::Auto;
|
config.enabled = false;
|
||||||
|
|
||||||
{
|
{
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
|
||||||
assert_eq!(tracedb.tracing_enabled(), false);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
|
||||||
assert_eq!(tracedb.tracing_enabled(), false);
|
|
||||||
}
|
|
||||||
|
|
||||||
config.enabled = Switch::Off;
|
|
||||||
|
|
||||||
{
|
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
|
||||||
assert_eq!(tracedb.tracing_enabled(), false);
|
assert_eq!(tracedb.tracing_enabled(), false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -501,50 +471,12 @@ mod tests {
|
|||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
|
|
||||||
// set tracing on
|
// set tracing on
|
||||||
config.enabled = Switch::On;
|
config.enabled = true;
|
||||||
|
|
||||||
{
|
{
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
|
||||||
assert_eq!(tracedb.tracing_enabled(), true);
|
assert_eq!(tracedb.tracing_enabled(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
|
||||||
assert_eq!(tracedb.tracing_enabled(), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
config.enabled = Switch::Auto;
|
|
||||||
|
|
||||||
{
|
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
|
||||||
assert_eq!(tracedb.tracing_enabled(), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
config.enabled = Switch::Off;
|
|
||||||
|
|
||||||
{
|
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
|
||||||
assert_eq!(tracedb.tracing_enabled(), false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic]
|
|
||||||
fn test_invalid_reopening_db() {
|
|
||||||
let temp = RandomTempPath::new();
|
|
||||||
let db = new_db(temp.as_str());
|
|
||||||
let mut config = Config::default();
|
|
||||||
|
|
||||||
// set tracing on
|
|
||||||
config.enabled = Switch::Off;
|
|
||||||
|
|
||||||
{
|
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
|
|
||||||
assert_eq!(tracedb.tracing_enabled(), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
config.enabled = Switch::On;
|
|
||||||
TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
|
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
|
||||||
@ -595,7 +527,7 @@ mod tests {
|
|||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap());
|
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap());
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.enabled = Switch::On;
|
config.enabled = true;
|
||||||
let block_0 = H256::from(0xa1);
|
let block_0 = H256::from(0xa1);
|
||||||
let block_1 = H256::from(0xa2);
|
let block_1 = H256::from(0xa2);
|
||||||
let tx_0 = H256::from(0xff);
|
let tx_0 = H256::from(0xff);
|
||||||
@ -607,7 +539,7 @@ mod tests {
|
|||||||
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
||||||
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
|
extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
|
||||||
|
|
||||||
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap();
|
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras));
|
||||||
|
|
||||||
// import block 0
|
// import block 0
|
||||||
let request = create_simple_import_request(0, block_0.clone());
|
let request = create_simple_import_request(0, block_0.clone());
|
||||||
@ -679,10 +611,10 @@ mod tests {
|
|||||||
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
|
||||||
|
|
||||||
// set tracing on
|
// set tracing on
|
||||||
config.enabled = Switch::On;
|
config.enabled = true;
|
||||||
|
|
||||||
{
|
{
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap();
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone()));
|
||||||
|
|
||||||
// import block 0
|
// import block 0
|
||||||
let request = create_simple_import_request(0, block_0.clone());
|
let request = create_simple_import_request(0, block_0.clone());
|
||||||
@ -692,7 +624,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap();
|
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras));
|
||||||
let traces = tracedb.transaction_traces(0, 0);
|
let traces = tracedb.transaction_traces(0, 0);
|
||||||
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]);
|
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]);
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ mod noop_tracer;
|
|||||||
|
|
||||||
pub use types::trace_types::{filter, flat, localized, trace};
|
pub use types::trace_types::{filter, flat, localized, trace};
|
||||||
pub use types::trace_types::error::Error as TraceError;
|
pub use types::trace_types::error::Error as TraceError;
|
||||||
pub use self::config::{Config, Switch};
|
pub use self::config::Config;
|
||||||
pub use self::db::TraceDB;
|
pub use self::db::TraceDB;
|
||||||
pub use self::error::Error;
|
pub use self::error::Error;
|
||||||
pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff};
|
pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff};
|
||||||
|
@ -41,6 +41,12 @@ pub struct Filter {
|
|||||||
/// If None, match all.
|
/// If None, match all.
|
||||||
/// If specified, log must contain one of these topics.
|
/// If specified, log must contain one of these topics.
|
||||||
pub topics: Vec<Option<Vec<H256>>>,
|
pub topics: Vec<Option<Vec<H256>>>,
|
||||||
|
|
||||||
|
/// Logs limit
|
||||||
|
///
|
||||||
|
/// If None, return all logs
|
||||||
|
/// If specified, should only return *last* `n` logs.
|
||||||
|
pub limit: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for Filter {
|
impl Clone for Filter {
|
||||||
@ -59,7 +65,8 @@ impl Clone for Filter {
|
|||||||
from_block: self.from_block.clone(),
|
from_block: self.from_block.clone(),
|
||||||
to_block: self.to_block.clone(),
|
to_block: self.to_block.clone(),
|
||||||
address: self.address.clone(),
|
address: self.address.clone(),
|
||||||
topics: topics[..].to_vec()
|
topics: topics[..].to_vec(),
|
||||||
|
limit: self.limit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -117,6 +124,7 @@ mod tests {
|
|||||||
to_block: BlockID::Latest,
|
to_block: BlockID::Latest,
|
||||||
address: None,
|
address: None,
|
||||||
topics: vec![None, None, None, None],
|
topics: vec![None, None, None, None],
|
||||||
|
limit: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let possibilities = none_filter.bloom_possibilities();
|
let possibilities = none_filter.bloom_possibilities();
|
||||||
@ -136,7 +144,8 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
]
|
],
|
||||||
|
limit: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let possibilities = filter.bloom_possibilities();
|
let possibilities = filter.bloom_possibilities();
|
||||||
@ -154,7 +163,8 @@ mod tests {
|
|||||||
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]),
|
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
]
|
],
|
||||||
|
limit: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let possibilities = filter.bloom_possibilities();
|
let possibilities = filter.bloom_possibilities();
|
||||||
@ -181,7 +191,8 @@ mod tests {
|
|||||||
]),
|
]),
|
||||||
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]),
|
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]),
|
||||||
None
|
None
|
||||||
]
|
],
|
||||||
|
limit: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
// number of possibilites should be equal 2 * 2 * 2 * 1 = 8
|
// number of possibilites should be equal 2 * 2 * 2 * 1 = 8
|
||||||
@ -201,7 +212,8 @@ mod tests {
|
|||||||
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]),
|
Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]),
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
]
|
],
|
||||||
|
limit: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let entry0 = LogEntry {
|
let entry0 = LogEntry {
|
||||||
|
@ -21,7 +21,7 @@ use std::cell::*;
|
|||||||
use rlp::*;
|
use rlp::*;
|
||||||
use util::sha3::Hashable;
|
use util::sha3::Hashable;
|
||||||
use util::{H256, Address, U256, Bytes};
|
use util::{H256, Address, U256, Bytes};
|
||||||
use ethkey::{Signature, sign, Secret, recover, public_to_address, Error as EthkeyError};
|
use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError};
|
||||||
use error::*;
|
use error::*;
|
||||||
use evm::Schedule;
|
use evm::Schedule;
|
||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
@ -305,13 +305,18 @@ impl SignedTransaction {
|
|||||||
match sender {
|
match sender {
|
||||||
Some(s) => Ok(s),
|
Some(s) => Ok(s),
|
||||||
None => {
|
None => {
|
||||||
let s = public_to_address(&try!(recover(&self.signature(), &self.unsigned.hash())));
|
let s = public_to_address(&try!(self.public_key()));
|
||||||
self.sender.set(Some(s));
|
self.sender.set(Some(s));
|
||||||
Ok(s)
|
Ok(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the public key of the sender.
|
||||||
|
pub fn public_key(&self) -> Result<Public, Error> {
|
||||||
|
Ok(try!(recover(&self.signature(), &self.unsigned.hash())))
|
||||||
|
}
|
||||||
|
|
||||||
/// Do basic validation, checking for valid signature and minimum gas,
|
/// Do basic validation, checking for valid signature and minimum gas,
|
||||||
// TODO: consider use in block validation.
|
// TODO: consider use in block validation.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -215,7 +215,7 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> {
|
|||||||
fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> {
|
fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> {
|
||||||
let block = UntrustedRlp::new(block);
|
let block = UntrustedRlp::new(block);
|
||||||
let tx = try!(block.at(1));
|
let tx = try!(block.at(1));
|
||||||
let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here
|
let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here
|
||||||
if expected_root != transactions_root {
|
if expected_root != transactions_root {
|
||||||
return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() })))
|
return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() })))
|
||||||
}
|
}
|
||||||
@ -422,7 +422,7 @@ mod tests {
|
|||||||
let mut uncles_rlp = RlpStream::new();
|
let mut uncles_rlp = RlpStream::new();
|
||||||
uncles_rlp.append(&good_uncles);
|
uncles_rlp.append(&good_uncles);
|
||||||
let good_uncles_hash = uncles_rlp.as_raw().sha3();
|
let good_uncles_hash = uncles_rlp.as_raw().sha3();
|
||||||
let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::<SignedTransaction>(t).to_vec()).collect());
|
let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::<SignedTransaction>(t).to_vec()));
|
||||||
|
|
||||||
let mut parent = good.clone();
|
let mut parent = good.clone();
|
||||||
parent.set_number(9);
|
parent.set_number(9);
|
||||||
|
@ -22,6 +22,7 @@ extern crate crypto as rcrypto;
|
|||||||
extern crate secp256k1;
|
extern crate secp256k1;
|
||||||
extern crate ethkey;
|
extern crate ethkey;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use tiny_keccak::Keccak;
|
use tiny_keccak::Keccak;
|
||||||
use rcrypto::pbkdf2::pbkdf2;
|
use rcrypto::pbkdf2::pbkdf2;
|
||||||
use rcrypto::scrypt::{scrypt, ScryptParams};
|
use rcrypto::scrypt::{scrypt, ScryptParams};
|
||||||
@ -39,6 +40,17 @@ pub enum Error {
|
|||||||
InvalidMessage,
|
InvalidMessage,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
|
let s = match *self {
|
||||||
|
Error::Secp(ref err) => err.to_string(),
|
||||||
|
Error::InvalidMessage => "Invalid message".into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
write!(f, "{}", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<SecpError> for Error {
|
impl From<SecpError> for Error {
|
||||||
fn from(e: SecpError) -> Self {
|
fn from(e: SecpError) -> Self {
|
||||||
Error::Secp(e)
|
Error::Secp(e)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ethkey"
|
name = "ethkey"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
authors = ["debris <marek.kotewicz@gmail.com>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rand = "0.3.14"
|
rand = "0.3.14"
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
use ethkey::{KeyPair, sign, Address, Secret, Signature, Message};
|
use ethkey::{KeyPair, sign, Address, Secret, Signature, Message};
|
||||||
use {json, Error, crypto};
|
use {json, Error, crypto};
|
||||||
use crypto::Keccak256;
|
use crypto::{Keccak256};
|
||||||
use random::Random;
|
use random::Random;
|
||||||
use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf};
|
use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf};
|
||||||
|
|
||||||
@ -170,6 +170,11 @@ impl SafeAccount {
|
|||||||
sign(&secret, message).map_err(From::from)
|
sign(&secret, message).map_err(From::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn decrypt(&self, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
|
let secret = try!(self.crypto.secret(password));
|
||||||
|
crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result<Self, Error> {
|
pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result<Self, Error> {
|
||||||
let secret = try!(self.crypto.secret(old_password));
|
let secret = try!(self.crypto.secret(old_password));
|
||||||
let result = SafeAccount {
|
let result = SafeAccount {
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::io::Error as IoError;
|
use std::io::Error as IoError;
|
||||||
use ethkey::Error as EthKeyError;
|
use ethkey::Error as EthKeyError;
|
||||||
|
use crypto::Error as EthCryptoError;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
@ -28,6 +29,7 @@ pub enum Error {
|
|||||||
InvalidKeyFile(String),
|
InvalidKeyFile(String),
|
||||||
CreationFailed,
|
CreationFailed,
|
||||||
EthKey(EthKeyError),
|
EthKey(EthKeyError),
|
||||||
|
EthCrypto(EthCryptoError),
|
||||||
Custom(String),
|
Custom(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,6 +44,7 @@ impl fmt::Display for Error {
|
|||||||
Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason),
|
Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason),
|
||||||
Error::CreationFailed => "Account creation failed".into(),
|
Error::CreationFailed => "Account creation failed".into(),
|
||||||
Error::EthKey(ref err) => err.to_string(),
|
Error::EthKey(ref err) => err.to_string(),
|
||||||
|
Error::EthCrypto(ref err) => err.to_string(),
|
||||||
Error::Custom(ref s) => s.clone(),
|
Error::Custom(ref s) => s.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -60,3 +63,9 @@ impl From<EthKeyError> for Error {
|
|||||||
Error::EthKey(err)
|
Error::EthKey(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<EthCryptoError> for Error {
|
||||||
|
fn from(err: EthCryptoError) -> Self {
|
||||||
|
Error::EthCrypto(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -144,6 +144,11 @@ impl SecretStore for EthStore {
|
|||||||
account.sign(password, message)
|
account.sign(password, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
|
let account = try!(self.get(account));
|
||||||
|
account.decrypt(password, shared_mac, message)
|
||||||
|
}
|
||||||
|
|
||||||
fn uuid(&self, address: &Address) -> Result<UUID, Error> {
|
fn uuid(&self, address: &Address) -> Result<UUID, Error> {
|
||||||
let account = try!(self.get(address));
|
let account = try!(self.get(address));
|
||||||
Ok(account.id.into())
|
Ok(account.id.into())
|
||||||
|
@ -20,33 +20,24 @@ use json::UUID;
|
|||||||
|
|
||||||
pub trait SecretStore: Send + Sync {
|
pub trait SecretStore: Send + Sync {
|
||||||
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error>;
|
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error>;
|
||||||
|
|
||||||
fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error>;
|
fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error>;
|
||||||
|
|
||||||
fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error>;
|
fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error>;
|
||||||
|
|
||||||
fn accounts(&self) -> Result<Vec<Address>, Error>;
|
|
||||||
|
|
||||||
fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>;
|
fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>;
|
||||||
|
|
||||||
fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>;
|
fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>;
|
||||||
|
|
||||||
fn sign(&self, account: &Address, password: &str, message: &Message) -> Result<Signature, Error>;
|
fn sign(&self, account: &Address, password: &str, message: &Message) -> Result<Signature, Error>;
|
||||||
|
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error>;
|
||||||
|
|
||||||
|
fn accounts(&self) -> Result<Vec<Address>, Error>;
|
||||||
fn uuid(&self, account: &Address) -> Result<UUID, Error>;
|
fn uuid(&self, account: &Address) -> Result<UUID, Error>;
|
||||||
|
|
||||||
fn name(&self, account: &Address) -> Result<String, Error>;
|
fn name(&self, account: &Address) -> Result<String, Error>;
|
||||||
|
|
||||||
fn meta(&self, account: &Address) -> Result<String, Error>;
|
fn meta(&self, account: &Address) -> Result<String, Error>;
|
||||||
|
|
||||||
fn set_name(&self, address: &Address, name: String) -> Result<(), Error>;
|
fn set_name(&self, address: &Address, name: String) -> Result<(), Error>;
|
||||||
|
|
||||||
fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error>;
|
fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error>;
|
||||||
|
|
||||||
fn local_path(&self) -> String;
|
fn local_path(&self) -> String;
|
||||||
|
|
||||||
fn list_geth_accounts(&self, testnet: bool) -> Vec<Address>;
|
fn list_geth_accounts(&self, testnet: bool) -> Vec<Address>;
|
||||||
|
|
||||||
fn import_geth_accounts(&self, desired: Vec<Address>, testnet: bool) -> Result<Vec<Address>, Error>;
|
fn import_geth_accounts(&self, desired: Vec<Address>, testnet: bool) -> Result<Vec<Address>, Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,15 +26,16 @@ use io::{PanicHandler, ForwardPanic};
|
|||||||
use util::{ToPretty, Uint};
|
use util::{ToPretty, Uint};
|
||||||
use rlp::PayloadInfo;
|
use rlp::PayloadInfo;
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::ClientService;
|
||||||
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID};
|
use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockID};
|
||||||
use ethcore::error::ImportError;
|
use ethcore::error::ImportError;
|
||||||
use ethcore::miner::Miner;
|
use ethcore::miner::Miner;
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
|
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
|
||||||
use informant::{Informant, MillisecondDuration};
|
use informant::{Informant, MillisecondDuration};
|
||||||
use io_handler::ImportIoHandler;
|
use io_handler::ImportIoHandler;
|
||||||
use params::{SpecType, Pruning};
|
|
||||||
use helpers::{to_client_config, execute_upgrades};
|
use helpers::{to_client_config, execute_upgrades};
|
||||||
use dir::Directories;
|
use dir::Directories;
|
||||||
|
use user_defaults::UserDefaults;
|
||||||
use fdlimit;
|
use fdlimit;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -113,29 +114,44 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
|
|||||||
// Setup panic handler
|
// Setup panic handler
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
|
// Setup logging
|
||||||
|
let _logger = setup_log(&cmd.logger_config);
|
||||||
|
|
||||||
|
// create dirs used by parity
|
||||||
|
try!(cmd.dirs.create_dirs());
|
||||||
|
|
||||||
// load spec file
|
// load spec file
|
||||||
let spec = try!(cmd.spec.spec());
|
let spec = try!(cmd.spec.spec());
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
// Setup logging
|
// database paths
|
||||||
let _logger = setup_log(&cmd.logger_config);
|
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
|
||||||
|
|
||||||
|
// user defaults path
|
||||||
|
let user_defaults_path = db_dirs.user_defaults_path();
|
||||||
|
|
||||||
|
// load user defaults
|
||||||
|
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||||
|
|
||||||
|
// check if tracing is on
|
||||||
|
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||||
|
|
||||||
fdlimit::raise_fd_limit();
|
fdlimit::raise_fd_limit();
|
||||||
|
|
||||||
// select pruning algorithm
|
// select pruning algorithm
|
||||||
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||||
|
|
||||||
// prepare client and snapshot paths.
|
// prepare client and snapshot paths.
|
||||||
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
|
let client_path = db_dirs.client_path(algorithm);
|
||||||
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref());
|
let snapshot_path = db_dirs.snapshot_path();
|
||||||
|
|
||||||
// execute upgrades
|
// execute upgrades
|
||||||
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile()));
|
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
|
||||||
|
|
||||||
// prepare client config
|
// prepare client config
|
||||||
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref());
|
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm);
|
||||||
|
|
||||||
// build client
|
// build client
|
||||||
let service = try!(ClientService::start(
|
let service = try!(ClientService::start(
|
||||||
@ -220,6 +236,12 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
client.flush_queue();
|
client.flush_queue();
|
||||||
|
|
||||||
|
// save user defaults
|
||||||
|
user_defaults.pruning = algorithm;
|
||||||
|
user_defaults.tracing = tracing;
|
||||||
|
try!(user_defaults.save(&user_defaults_path));
|
||||||
|
|
||||||
let report = client.report();
|
let report = client.report();
|
||||||
|
|
||||||
let ms = timer.elapsed().as_milliseconds();
|
let ms = timer.elapsed().as_milliseconds();
|
||||||
@ -238,6 +260,12 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
|
|||||||
// Setup panic handler
|
// Setup panic handler
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
|
// Setup logging
|
||||||
|
let _logger = setup_log(&cmd.logger_config);
|
||||||
|
|
||||||
|
// create dirs used by parity
|
||||||
|
try!(cmd.dirs.create_dirs());
|
||||||
|
|
||||||
let format = cmd.format.unwrap_or_default();
|
let format = cmd.format.unwrap_or_default();
|
||||||
|
|
||||||
// load spec file
|
// load spec file
|
||||||
@ -246,23 +274,32 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
|
|||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
// Setup logging
|
// database paths
|
||||||
let _logger = setup_log(&cmd.logger_config);
|
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
|
||||||
|
|
||||||
|
// user defaults path
|
||||||
|
let user_defaults_path = db_dirs.user_defaults_path();
|
||||||
|
|
||||||
|
// load user defaults
|
||||||
|
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||||
|
|
||||||
|
// check if tracing is on
|
||||||
|
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||||
|
|
||||||
fdlimit::raise_fd_limit();
|
fdlimit::raise_fd_limit();
|
||||||
|
|
||||||
// select pruning algorithm
|
// select pruning algorithm
|
||||||
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref());
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||||
|
|
||||||
// prepare client and snapshot paths.
|
// prepare client and snapshot paths.
|
||||||
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
|
let client_path = db_dirs.client_path(algorithm);
|
||||||
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref());
|
let snapshot_path = db_dirs.snapshot_path();
|
||||||
|
|
||||||
// execute upgrades
|
// execute upgrades
|
||||||
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile()));
|
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
|
||||||
|
|
||||||
// prepare client config
|
// prepare client config
|
||||||
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref());
|
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm);
|
||||||
|
|
||||||
let service = try!(ClientService::start(
|
let service = try!(ClientService::start(
|
||||||
client_config,
|
client_config,
|
||||||
|
@ -32,6 +32,8 @@ usage! {
|
|||||||
cmd_snapshot: bool,
|
cmd_snapshot: bool,
|
||||||
cmd_restore: bool,
|
cmd_restore: bool,
|
||||||
cmd_ui: bool,
|
cmd_ui: bool,
|
||||||
|
cmd_tools: bool,
|
||||||
|
cmd_hash: bool,
|
||||||
|
|
||||||
// Arguments
|
// Arguments
|
||||||
arg_pid_file: String,
|
arg_pid_file: String,
|
||||||
@ -441,6 +443,8 @@ mod tests {
|
|||||||
cmd_snapshot: false,
|
cmd_snapshot: false,
|
||||||
cmd_restore: false,
|
cmd_restore: false,
|
||||||
cmd_ui: false,
|
cmd_ui: false,
|
||||||
|
cmd_tools: false,
|
||||||
|
cmd_hash: false,
|
||||||
|
|
||||||
// Arguments
|
// Arguments
|
||||||
arg_pid_file: "".into(),
|
arg_pid_file: "".into(),
|
||||||
|
@ -14,6 +14,7 @@ Usage:
|
|||||||
parity signer new-token [options]
|
parity signer new-token [options]
|
||||||
parity snapshot <file> [options]
|
parity snapshot <file> [options]
|
||||||
parity restore [ <file> ] [options]
|
parity restore [ <file> ] [options]
|
||||||
|
parity tools hash <file>
|
||||||
|
|
||||||
Operating Options:
|
Operating Options:
|
||||||
--mode MODE Set the operating mode. MODE can be one of:
|
--mode MODE Set the operating mode. MODE can be one of:
|
||||||
@ -283,4 +284,3 @@ Miscellaneous Options:
|
|||||||
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
--no-color Don't use terminal color codes in output. (default: {flag_no_color})
|
||||||
-v --version Show information about version.
|
-v --version Show information about version.
|
||||||
-h --help Show this screen.
|
-h --help Show this screen.
|
||||||
|
|
||||||
|
@ -51,6 +51,7 @@ pub enum Cmd {
|
|||||||
Blockchain(BlockchainCmd),
|
Blockchain(BlockchainCmd),
|
||||||
SignerToken(String),
|
SignerToken(String),
|
||||||
Snapshot(SnapshotCommand),
|
Snapshot(SnapshotCommand),
|
||||||
|
Hash(Option<String>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -94,8 +95,10 @@ impl Configuration {
|
|||||||
|
|
||||||
let cmd = if self.args.flag_version {
|
let cmd = if self.args.flag_version {
|
||||||
Cmd::Version
|
Cmd::Version
|
||||||
} else if self.args.cmd_signer {
|
} else if self.args.cmd_signer && self.args.cmd_new_token {
|
||||||
Cmd::SignerToken(dirs.signer)
|
Cmd::SignerToken(dirs.signer)
|
||||||
|
} else if self.args.cmd_tools && self.args.cmd_hash {
|
||||||
|
Cmd::Hash(self.args.arg_file)
|
||||||
} else if self.args.cmd_account {
|
} else if self.args.cmd_account {
|
||||||
let account_cmd = if self.args.cmd_new {
|
let account_cmd = if self.args.cmd_new {
|
||||||
let new_acc = NewAccount {
|
let new_acc = NewAccount {
|
||||||
|
@ -52,32 +52,13 @@ impl Directories {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the chain's root path.
|
/// Database paths.
|
||||||
pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf {
|
pub fn database(&self, genesis_hash: H256, fork_name: Option<String>) -> DatabaseDirectories {
|
||||||
let mut dir = Path::new(&self.db).to_path_buf();
|
DatabaseDirectories {
|
||||||
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default()));
|
path: self.db.clone(),
|
||||||
dir
|
genesis_hash: genesis_hash,
|
||||||
}
|
fork_name: fork_name,
|
||||||
|
}
|
||||||
/// Get the root path for database
|
|
||||||
pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
|
|
||||||
let mut dir = self.chain_path(genesis_hash, fork_name);
|
|
||||||
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
|
|
||||||
dir
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the path for the databases given the genesis_hash and information on the databases.
|
|
||||||
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
|
|
||||||
let mut dir = self.db_version_path(genesis_hash, fork_name, pruning);
|
|
||||||
dir.push("db");
|
|
||||||
dir
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the path for the snapshot directory given the genesis hash and fork name.
|
|
||||||
pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf {
|
|
||||||
let mut dir = self.chain_path(genesis_hash, fork_name);
|
|
||||||
dir.push("snapshot");
|
|
||||||
dir
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the ipc sockets path
|
/// Get the ipc sockets path
|
||||||
@ -88,6 +69,49 @@ impl Directories {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct DatabaseDirectories {
|
||||||
|
pub path: String,
|
||||||
|
pub genesis_hash: H256,
|
||||||
|
pub fork_name: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseDirectories {
|
||||||
|
fn fork_path(&self) -> PathBuf {
|
||||||
|
let mut dir = Path::new(&self.path).to_path_buf();
|
||||||
|
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the root path for database
|
||||||
|
pub fn version_path(&self, pruning: Algorithm) -> PathBuf {
|
||||||
|
let mut dir = self.fork_path();
|
||||||
|
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path for the databases given the genesis_hash and information on the databases.
|
||||||
|
pub fn client_path(&self, pruning: Algorithm) -> PathBuf {
|
||||||
|
let mut dir = self.version_path(pruning);
|
||||||
|
dir.push("db");
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get user defaults path
|
||||||
|
pub fn user_defaults_path(&self) -> PathBuf {
|
||||||
|
let mut dir = self.fork_path();
|
||||||
|
dir.push("user_defaults");
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path for the snapshot directory given the genesis hash and fork name.
|
||||||
|
pub fn snapshot_path(&self) -> PathBuf {
|
||||||
|
let mut dir = self.fork_path();
|
||||||
|
dir.push("snapshot");
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::Directories;
|
use super::Directories;
|
||||||
|
@ -19,13 +19,12 @@ use std::io::{Write, Read, BufReader, BufRead};
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use util::{clean_0x, U256, Uint, Address, path, H256, CompactionProfile};
|
use util::{clean_0x, U256, Uint, Address, path, CompactionProfile};
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig};
|
use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig};
|
||||||
use ethcore::miner::PendingSet;
|
use ethcore::miner::PendingSet;
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
use dir::Directories;
|
use dir::DatabaseDirectories;
|
||||||
use params::Pruning;
|
|
||||||
use upgrade::upgrade;
|
use upgrade::upgrade;
|
||||||
use migration::migrate;
|
use migration::migrate;
|
||||||
use ethsync::is_valid_node_url;
|
use ethsync::is_valid_node_url;
|
||||||
@ -190,16 +189,13 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration {
|
|||||||
#[cfg_attr(feature = "dev", allow(too_many_arguments))]
|
#[cfg_attr(feature = "dev", allow(too_many_arguments))]
|
||||||
pub fn to_client_config(
|
pub fn to_client_config(
|
||||||
cache_config: &CacheConfig,
|
cache_config: &CacheConfig,
|
||||||
dirs: &Directories,
|
|
||||||
genesis_hash: H256,
|
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
tracing: Switch,
|
tracing: bool,
|
||||||
pruning: Pruning,
|
|
||||||
compaction: DatabaseCompactionProfile,
|
compaction: DatabaseCompactionProfile,
|
||||||
wal: bool,
|
wal: bool,
|
||||||
vm_type: VMType,
|
vm_type: VMType,
|
||||||
name: String,
|
name: String,
|
||||||
fork_name: Option<&String>,
|
pruning: Algorithm,
|
||||||
) -> ClientConfig {
|
) -> ClientConfig {
|
||||||
let mut client_config = ClientConfig::default();
|
let mut client_config = ClientConfig::default();
|
||||||
|
|
||||||
@ -221,7 +217,7 @@ pub fn to_client_config(
|
|||||||
|
|
||||||
client_config.mode = mode;
|
client_config.mode = mode;
|
||||||
client_config.tracing.enabled = tracing;
|
client_config.tracing.enabled = tracing;
|
||||||
client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name);
|
client_config.pruning = pruning;
|
||||||
client_config.db_compaction = compaction;
|
client_config.db_compaction = compaction;
|
||||||
client_config.db_wal = wal;
|
client_config.db_wal = wal;
|
||||||
client_config.vm_type = vm_type;
|
client_config.vm_type = vm_type;
|
||||||
@ -230,14 +226,12 @@ pub fn to_client_config(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn execute_upgrades(
|
pub fn execute_upgrades(
|
||||||
dirs: &Directories,
|
dirs: &DatabaseDirectories,
|
||||||
genesis_hash: H256,
|
|
||||||
fork_name: Option<&String>,
|
|
||||||
pruning: Algorithm,
|
pruning: Algorithm,
|
||||||
compaction_profile: CompactionProfile
|
compaction_profile: CompactionProfile
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
|
|
||||||
match upgrade(Some(&dirs.db)) {
|
match upgrade(Some(&dirs.path)) {
|
||||||
Ok(upgrades_applied) if upgrades_applied > 0 => {
|
Ok(upgrades_applied) if upgrades_applied > 0 => {
|
||||||
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
|
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
|
||||||
},
|
},
|
||||||
@ -247,7 +241,7 @@ pub fn execute_upgrades(
|
|||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning);
|
let client_path = dirs.version_path(pruning);
|
||||||
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
|
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,8 @@ extern crate semver;
|
|||||||
extern crate ethcore_io as io;
|
extern crate ethcore_io as io;
|
||||||
extern crate ethcore_ipc as ipc;
|
extern crate ethcore_ipc as ipc;
|
||||||
extern crate ethcore_ipc_nano as nanoipc;
|
extern crate ethcore_ipc_nano as nanoipc;
|
||||||
|
extern crate serde;
|
||||||
|
extern crate serde_json;
|
||||||
extern crate rlp;
|
extern crate rlp;
|
||||||
|
|
||||||
extern crate json_ipc_server as jsonipc;
|
extern crate json_ipc_server as jsonipc;
|
||||||
@ -106,15 +108,29 @@ mod run;
|
|||||||
mod sync;
|
mod sync;
|
||||||
#[cfg(feature="ipc")]
|
#[cfg(feature="ipc")]
|
||||||
mod boot;
|
mod boot;
|
||||||
|
mod user_defaults;
|
||||||
|
|
||||||
#[cfg(feature="stratum")]
|
#[cfg(feature="stratum")]
|
||||||
mod stratum;
|
mod stratum;
|
||||||
|
|
||||||
use std::{process, env};
|
use std::{process, env};
|
||||||
|
use std::io::BufReader;
|
||||||
|
use std::fs::File;
|
||||||
|
use util::sha3::sha3;
|
||||||
use cli::Args;
|
use cli::Args;
|
||||||
use configuration::{Cmd, Configuration};
|
use configuration::{Cmd, Configuration};
|
||||||
use deprecated::find_deprecated;
|
use deprecated::find_deprecated;
|
||||||
|
|
||||||
|
fn print_hash_of(maybe_file: Option<String>) -> Result<String, String> {
|
||||||
|
if let Some(file) = maybe_file {
|
||||||
|
let mut f = BufReader::new(try!(File::open(&file).map_err(|_| "Unable to open file".to_owned())));
|
||||||
|
let hash = try!(sha3(&mut f).map_err(|_| "Unable to read from file".to_owned()));
|
||||||
|
Ok(hash.hex())
|
||||||
|
} else {
|
||||||
|
Err("Streaming from standard input not yet supported. Specify a file.".to_owned())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn execute(command: Cmd) -> Result<String, String> {
|
fn execute(command: Cmd) -> Result<String, String> {
|
||||||
match command {
|
match command {
|
||||||
Cmd::Run(run_cmd) => {
|
Cmd::Run(run_cmd) => {
|
||||||
@ -122,6 +138,7 @@ fn execute(command: Cmd) -> Result<String, String> {
|
|||||||
Ok("".into())
|
Ok("".into())
|
||||||
},
|
},
|
||||||
Cmd::Version => Ok(Args::print_version()),
|
Cmd::Version => Ok(Args::print_version()),
|
||||||
|
Cmd::Hash(maybe_file) => print_hash_of(maybe_file),
|
||||||
Cmd::Account(account_cmd) => account::execute(account_cmd),
|
Cmd::Account(account_cmd) => account::execute(account_cmd),
|
||||||
Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd),
|
Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd),
|
||||||
Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd),
|
Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd),
|
||||||
|
103
parity/params.rs
103
parity/params.rs
@ -14,15 +14,14 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::str::FromStr;
|
use std::{str, fs};
|
||||||
use std::fs;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use util::{H256, Address, U256, version_data};
|
use util::{Address, U256, version_data};
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
use ethcore::ethereum;
|
use ethcore::ethereum;
|
||||||
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
|
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
|
||||||
use dir::Directories;
|
use user_defaults::UserDefaults;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum SpecType {
|
pub enum SpecType {
|
||||||
@ -39,7 +38,7 @@ impl Default for SpecType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for SpecType {
|
impl str::FromStr for SpecType {
|
||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
@ -81,7 +80,7 @@ impl Default for Pruning {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for Pruning {
|
impl str::FromStr for Pruning {
|
||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
@ -93,24 +92,12 @@ impl FromStr for Pruning {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Pruning {
|
impl Pruning {
|
||||||
pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
|
pub fn to_algorithm(&self, user_defaults: &UserDefaults) -> Algorithm {
|
||||||
match *self {
|
match *self {
|
||||||
Pruning::Specific(algo) => algo,
|
Pruning::Specific(algo) => algo,
|
||||||
Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name),
|
Pruning::Auto => user_defaults.pruning,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
|
|
||||||
let mut algo_types = Algorithm::all_types();
|
|
||||||
// if all dbs have the same modification time, the last element is the default one
|
|
||||||
algo_types.push(Algorithm::default());
|
|
||||||
|
|
||||||
algo_types.into_iter().max_by_key(|i| {
|
|
||||||
let mut client_path = dirs.client_path(genesis_hash, fork_name, *i);
|
|
||||||
client_path.push("CURRENT");
|
|
||||||
fs::metadata(&client_path).and_then(|m| m.modified()).ok()
|
|
||||||
}).unwrap()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -128,7 +115,7 @@ impl Default for ResealPolicy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for ResealPolicy {
|
impl str::FromStr for ResealPolicy {
|
||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
@ -223,10 +210,50 @@ impl Default for MinerExtras {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// 3-value enum.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||||
|
pub enum Switch {
|
||||||
|
/// True.
|
||||||
|
On,
|
||||||
|
/// False.
|
||||||
|
Off,
|
||||||
|
/// Auto.
|
||||||
|
Auto,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Switch {
|
||||||
|
fn default() -> Self {
|
||||||
|
Switch::Auto
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl str::FromStr for Switch {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"on" => Ok(Switch::On),
|
||||||
|
"off" => Ok(Switch::Off),
|
||||||
|
"auto" => Ok(Switch::Auto),
|
||||||
|
other => Err(format!("Invalid switch value: {}", other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> Result<bool, String> {
|
||||||
|
match (user_defaults.is_first_launch, switch, user_defaults.tracing) {
|
||||||
|
(false, Switch::On, false) => Err("TraceDB resync required".into()),
|
||||||
|
(_, Switch::On, _) => Ok(true),
|
||||||
|
(_, Switch::Off, _) => Ok(false),
|
||||||
|
(_, Switch::Auto, def) => Ok(def),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use util::journaldb::Algorithm;
|
use util::journaldb::Algorithm;
|
||||||
use super::{SpecType, Pruning, ResealPolicy};
|
use user_defaults::UserDefaults;
|
||||||
|
use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_spec_type_parsing() {
|
fn test_spec_type_parsing() {
|
||||||
@ -274,4 +301,36 @@ mod tests {
|
|||||||
let all = ResealPolicy { own: true, external: true };
|
let all = ResealPolicy { own: true, external: true };
|
||||||
assert_eq!(all, ResealPolicy::default());
|
assert_eq!(all, ResealPolicy::default());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_switch_parsing() {
|
||||||
|
assert_eq!(Switch::On, "on".parse().unwrap());
|
||||||
|
assert_eq!(Switch::Off, "off".parse().unwrap());
|
||||||
|
assert_eq!(Switch::Auto, "auto".parse().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_switch_default() {
|
||||||
|
assert_eq!(Switch::default(), Switch::Auto);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn user_defaults_with_tracing(first_launch: bool, tracing: bool) -> UserDefaults {
|
||||||
|
let mut ud = UserDefaults::default();
|
||||||
|
ud.is_first_launch = first_launch;
|
||||||
|
ud.tracing = tracing;
|
||||||
|
ud
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_switch_to_bool() {
|
||||||
|
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, true)).unwrap());
|
||||||
|
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, false)).unwrap());
|
||||||
|
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, true)).unwrap());
|
||||||
|
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, false)).unwrap());
|
||||||
|
|
||||||
|
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, true)).unwrap());
|
||||||
|
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, false)).unwrap());
|
||||||
|
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, true)).unwrap());
|
||||||
|
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, false)).is_err());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ use ethcore::client::Client;
|
|||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use ethsync::{ManageNetwork, SyncProvider};
|
use ethsync::{ManageNetwork, SyncProvider};
|
||||||
use ethcore_rpc::{Extendable, NetworkSettings};
|
use ethcore_rpc::{Extendable, NetworkSettings};
|
||||||
pub use ethcore_rpc::ConfirmationsQueue;
|
pub use ethcore_rpc::SignerService;
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
|
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
|
||||||
@ -94,7 +94,7 @@ impl FromStr for ApiSet {
|
|||||||
|
|
||||||
pub struct Dependencies {
|
pub struct Dependencies {
|
||||||
pub signer_port: Option<u16>,
|
pub signer_port: Option<u16>,
|
||||||
pub signer_queue: Arc<ConfirmationsQueue>,
|
pub signer_service: Arc<SignerService>,
|
||||||
pub client: Arc<Client>,
|
pub client: Arc<Client>,
|
||||||
pub sync: Arc<SyncProvider>,
|
pub sync: Arc<SyncProvider>,
|
||||||
pub net: Arc<ManageNetwork>,
|
pub net: Arc<ManageNetwork>,
|
||||||
@ -173,7 +173,7 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
|
|||||||
server.add_delegate(filter_client.to_delegate());
|
server.add_delegate(filter_client.to_delegate());
|
||||||
|
|
||||||
if deps.signer_port.is_some() {
|
if deps.signer_port.is_some() {
|
||||||
server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate());
|
server.add_delegate(EthSigningQueueClient::new(&deps.signer_service, &deps.client, &deps.miner, &deps.secret_store).to_delegate());
|
||||||
} else {
|
} else {
|
||||||
server.add_delegate(EthSigningUnsafeClient::new(&deps.client, &deps.secret_store, &deps.miner).to_delegate());
|
server.add_delegate(EthSigningUnsafeClient::new(&deps.client, &deps.secret_store, &deps.miner).to_delegate());
|
||||||
}
|
}
|
||||||
@ -182,11 +182,11 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
|
|||||||
server.add_delegate(PersonalClient::new(&deps.secret_store, &deps.client, &deps.miner, deps.signer_port, deps.geth_compatibility).to_delegate());
|
server.add_delegate(PersonalClient::new(&deps.secret_store, &deps.client, &deps.miner, deps.signer_port, deps.geth_compatibility).to_delegate());
|
||||||
},
|
},
|
||||||
Api::Signer => {
|
Api::Signer => {
|
||||||
server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_queue).to_delegate());
|
server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_service).to_delegate());
|
||||||
},
|
},
|
||||||
Api::Ethcore => {
|
Api::Ethcore => {
|
||||||
let queue = deps.signer_port.map(|_| deps.signer_queue.clone());
|
let signer = deps.signer_port.map(|_| deps.signer_service.clone());
|
||||||
server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, &deps.sync, &deps.net_service, deps.logger.clone(), deps.settings.clone(), queue).to_delegate())
|
server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, &deps.sync, &deps.net_service, deps.logger.clone(), deps.settings.clone(), signer).to_delegate())
|
||||||
},
|
},
|
||||||
Api::EthcoreSet => {
|
Api::EthcoreSet => {
|
||||||
server.add_delegate(EthcoreSetClient::new(&deps.client, &deps.miner, &deps.net_service).to_delegate())
|
server.add_delegate(EthcoreSetClient::new(&deps.client, &deps.miner, &deps.net_service).to_delegate())
|
||||||
|
@ -23,7 +23,7 @@ use ethcore_rpc::NetworkSettings;
|
|||||||
use ethsync::NetworkConfiguration;
|
use ethsync::NetworkConfiguration;
|
||||||
use util::{Colour, version, U256};
|
use util::{Colour, version, U256};
|
||||||
use io::{MayPanic, ForwardPanic, PanicHandler};
|
use io::{MayPanic, ForwardPanic, PanicHandler};
|
||||||
use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNotify};
|
use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, ChainNotify};
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::ClientService;
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
|
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
|
||||||
@ -35,10 +35,11 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration};
|
|||||||
use signer::SignerServer;
|
use signer::SignerServer;
|
||||||
use dapps::WebappServer;
|
use dapps::WebappServer;
|
||||||
use io_handler::ClientIoHandler;
|
use io_handler::ClientIoHandler;
|
||||||
use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras};
|
use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool};
|
||||||
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
|
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
|
||||||
use dir::Directories;
|
use dir::Directories;
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
|
use user_defaults::UserDefaults;
|
||||||
use dapps;
|
use dapps;
|
||||||
use signer;
|
use signer;
|
||||||
use modules;
|
use modules;
|
||||||
@ -87,34 +88,45 @@ pub struct RunCmd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
||||||
// increase max number of open files
|
// set up panic handler
|
||||||
raise_fd_limit();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
|
||||||
// set up logger
|
// set up logger
|
||||||
let logger = try!(setup_log(&cmd.logger_config));
|
let logger = try!(setup_log(&cmd.logger_config));
|
||||||
|
|
||||||
// set up panic handler
|
// increase max number of open files
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
raise_fd_limit();
|
||||||
|
|
||||||
// create dirs used by parity
|
// create dirs used by parity
|
||||||
try!(cmd.dirs.create_dirs());
|
try!(cmd.dirs.create_dirs());
|
||||||
|
|
||||||
// load spec
|
// load spec
|
||||||
let spec = try!(cmd.spec.spec());
|
let spec = try!(cmd.spec.spec());
|
||||||
let fork_name = spec.fork_name.clone();
|
|
||||||
|
|
||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
|
// database paths
|
||||||
|
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
|
||||||
|
|
||||||
|
// user defaults path
|
||||||
|
let user_defaults_path = db_dirs.user_defaults_path();
|
||||||
|
|
||||||
|
// load user defaults
|
||||||
|
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||||
|
|
||||||
|
// check if tracing is on
|
||||||
|
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||||
|
|
||||||
// select pruning algorithm
|
// select pruning algorithm
|
||||||
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref());
|
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||||
|
|
||||||
// prepare client and snapshot paths.
|
// prepare client and snapshot paths.
|
||||||
let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm);
|
let client_path = db_dirs.client_path(algorithm);
|
||||||
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, fork_name.as_ref());
|
let snapshot_path = db_dirs.snapshot_path();
|
||||||
|
|
||||||
// execute upgrades
|
// execute upgrades
|
||||||
try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile()));
|
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
|
||||||
|
|
||||||
// run in daemon mode
|
// run in daemon mode
|
||||||
if let Some(pid_file) = cmd.daemon {
|
if let Some(pid_file) = cmd.daemon {
|
||||||
@ -152,16 +164,13 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
|||||||
// create client config
|
// create client config
|
||||||
let client_config = to_client_config(
|
let client_config = to_client_config(
|
||||||
&cmd.cache_config,
|
&cmd.cache_config,
|
||||||
&cmd.dirs,
|
|
||||||
genesis_hash,
|
|
||||||
cmd.mode,
|
cmd.mode,
|
||||||
cmd.tracing,
|
tracing,
|
||||||
cmd.pruning,
|
|
||||||
cmd.compaction,
|
cmd.compaction,
|
||||||
cmd.wal,
|
cmd.wal,
|
||||||
cmd.vm_type,
|
cmd.vm_type,
|
||||||
cmd.name,
|
cmd.name,
|
||||||
fork_name.as_ref(),
|
algorithm,
|
||||||
);
|
);
|
||||||
|
|
||||||
// set up bootnodes
|
// set up bootnodes
|
||||||
@ -206,9 +215,10 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set up dependencies for rpc servers
|
// set up dependencies for rpc servers
|
||||||
|
let signer_path = cmd.signer_conf.signer_path.clone();
|
||||||
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
|
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
|
||||||
signer_port: cmd.signer_port,
|
signer_port: cmd.signer_port,
|
||||||
signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()),
|
signer_service: Arc::new(rpc_apis::SignerService::new(move || signer::new_token(signer_path.clone()))),
|
||||||
client: client.clone(),
|
client: client.clone(),
|
||||||
sync: sync_provider.clone(),
|
sync: sync_provider.clone(),
|
||||||
net: manage_network.clone(),
|
net: manage_network.clone(),
|
||||||
@ -287,6 +297,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
|||||||
url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port));
|
url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// save user defaults
|
||||||
|
user_defaults.pruning = algorithm;
|
||||||
|
user_defaults.tracing = tracing;
|
||||||
|
try!(user_defaults.save(&user_defaults_path));
|
||||||
|
|
||||||
// Handle exit
|
// Handle exit
|
||||||
wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server);
|
wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server);
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result<SignerServer, Str
|
|||||||
|
|
||||||
let start_result = {
|
let start_result = {
|
||||||
let server = signer::ServerBuilder::new(
|
let server = signer::ServerBuilder::new(
|
||||||
deps.apis.signer_queue.clone(),
|
deps.apis.signer_service.queue(),
|
||||||
codes_path(conf.signer_path),
|
codes_path(conf.signer_path),
|
||||||
);
|
);
|
||||||
if conf.skip_origin_validation {
|
if conf.skip_origin_validation {
|
||||||
|
@ -25,14 +25,15 @@ use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService as SS};
|
|||||||
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
|
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
|
||||||
use ethcore::snapshot::service::Service as SnapshotService;
|
use ethcore::snapshot::service::Service as SnapshotService;
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::ClientService;
|
||||||
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType};
|
use ethcore::client::{Mode, DatabaseCompactionProfile, VMType};
|
||||||
use ethcore::miner::Miner;
|
use ethcore::miner::Miner;
|
||||||
use ethcore::ids::BlockID;
|
use ethcore::ids::BlockID;
|
||||||
|
|
||||||
use cache::CacheConfig;
|
use cache::CacheConfig;
|
||||||
use params::{SpecType, Pruning};
|
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
|
||||||
use helpers::{to_client_config, execute_upgrades};
|
use helpers::{to_client_config, execute_upgrades};
|
||||||
use dir::Directories;
|
use dir::Directories;
|
||||||
|
use user_defaults::UserDefaults;
|
||||||
use fdlimit;
|
use fdlimit;
|
||||||
|
|
||||||
use io::PanicHandler;
|
use io::PanicHandler;
|
||||||
@ -129,23 +130,35 @@ impl SnapshotCommand {
|
|||||||
// load genesis hash
|
// load genesis hash
|
||||||
let genesis_hash = spec.genesis_header().hash();
|
let genesis_hash = spec.genesis_header().hash();
|
||||||
|
|
||||||
|
// database paths
|
||||||
|
let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone());
|
||||||
|
|
||||||
|
// user defaults path
|
||||||
|
let user_defaults_path = db_dirs.user_defaults_path();
|
||||||
|
|
||||||
|
// load user defaults
|
||||||
|
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||||
|
|
||||||
|
// check if tracing is on
|
||||||
|
let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults));
|
||||||
|
|
||||||
// Setup logging
|
// Setup logging
|
||||||
let _logger = setup_log(&self.logger_config);
|
let _logger = setup_log(&self.logger_config);
|
||||||
|
|
||||||
fdlimit::raise_fd_limit();
|
fdlimit::raise_fd_limit();
|
||||||
|
|
||||||
// select pruning algorithm
|
// select pruning algorithm
|
||||||
let algorithm = self.pruning.to_algorithm(&self.dirs, genesis_hash, spec.fork_name.as_ref());
|
let algorithm = self.pruning.to_algorithm(&user_defaults);
|
||||||
|
|
||||||
// prepare client and snapshot paths.
|
// prepare client and snapshot paths.
|
||||||
let client_path = self.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm);
|
let client_path = db_dirs.client_path(algorithm);
|
||||||
let snapshot_path = self.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref());
|
let snapshot_path = db_dirs.snapshot_path();
|
||||||
|
|
||||||
// execute upgrades
|
// execute upgrades
|
||||||
try!(execute_upgrades(&self.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, self.compaction.compaction_profile()));
|
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile()));
|
||||||
|
|
||||||
// prepare client config
|
// prepare client config
|
||||||
let client_config = to_client_config(&self.cache_config, &self.dirs, genesis_hash, self.mode, self.tracing, self.pruning, self.compaction, self.wal, VMType::default(), "".into(), spec.fork_name.as_ref());
|
let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm);
|
||||||
|
|
||||||
let service = try!(ClientService::start(
|
let service = try!(ClientService::start(
|
||||||
client_config,
|
client_config,
|
||||||
|
98
parity/user_defaults.rs
Normal file
98
parity/user_defaults.rs
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use serde::{Serialize, Serializer, Error, Deserialize, Deserializer};
|
||||||
|
use serde::de::{Visitor, MapVisitor};
|
||||||
|
use serde::de::impls::BTreeMapVisitor;
|
||||||
|
use serde_json::Value;
|
||||||
|
use serde_json::de::from_reader;
|
||||||
|
use serde_json::ser::to_string;
|
||||||
|
use util::journaldb::Algorithm;
|
||||||
|
|
||||||
|
pub struct UserDefaults {
|
||||||
|
pub is_first_launch: bool,
|
||||||
|
pub pruning: Algorithm,
|
||||||
|
pub tracing: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for UserDefaults {
|
||||||
|
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
|
||||||
|
where S: Serializer {
|
||||||
|
let mut map: BTreeMap<String, Value> = BTreeMap::new();
|
||||||
|
map.insert("pruning".into(), Value::String(self.pruning.as_str().into()));
|
||||||
|
map.insert("tracing".into(), Value::Bool(self.tracing));
|
||||||
|
map.serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct UserDefaultsVisitor;
|
||||||
|
|
||||||
|
impl Deserialize for UserDefaults {
|
||||||
|
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
|
||||||
|
where D: Deserializer {
|
||||||
|
deserializer.deserialize(UserDefaultsVisitor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Visitor for UserDefaultsVisitor {
|
||||||
|
type Value = UserDefaults;
|
||||||
|
|
||||||
|
fn visit_map<V>(&mut self, visitor: V) -> Result<Self::Value, V::Error>
|
||||||
|
where V: MapVisitor {
|
||||||
|
let mut map: BTreeMap<String, Value> = try!(BTreeMapVisitor::new().visit_map(visitor));
|
||||||
|
let pruning: Value = try!(map.remove("pruning".into()).ok_or_else(|| Error::custom("missing pruning")));
|
||||||
|
let pruning = try!(pruning.as_str().ok_or_else(|| Error::custom("invalid pruning value")));
|
||||||
|
let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method")));
|
||||||
|
let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing")));
|
||||||
|
let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value")));
|
||||||
|
|
||||||
|
let user_defaults = UserDefaults {
|
||||||
|
is_first_launch: false,
|
||||||
|
pruning: pruning,
|
||||||
|
tracing: tracing,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(user_defaults)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for UserDefaults {
|
||||||
|
fn default() -> Self {
|
||||||
|
UserDefaults {
|
||||||
|
is_first_launch: true,
|
||||||
|
pruning: Algorithm::default(),
|
||||||
|
tracing: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserDefaults {
|
||||||
|
pub fn load<P>(path: P) -> Result<Self, String> where P: AsRef<Path> {
|
||||||
|
match File::open(path) {
|
||||||
|
Ok(file) => from_reader(file).map_err(|e| e.to_string()),
|
||||||
|
_ => Ok(UserDefaults::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn save<P>(self, path: P) -> Result<(), String> where P: AsRef<Path> {
|
||||||
|
let mut file: File = try!(File::create(path).map_err(|_| "Cannot create user defaults file".to_owned()));
|
||||||
|
file.write_all(to_string(&self).unwrap().as_bytes()).map_err(|_| "Failed to save user defaults".to_owned())
|
||||||
|
}
|
||||||
|
}
|
@ -17,6 +17,7 @@ jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc-http-server.gi
|
|||||||
ethcore-io = { path = "../util/io" }
|
ethcore-io = { path = "../util/io" }
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore = { path = "../ethcore" }
|
ethcore = { path = "../ethcore" }
|
||||||
|
ethcrypto = { path = "../ethcrypto" }
|
||||||
ethkey = { path = "../ethkey" }
|
ethkey = { path = "../ethkey" }
|
||||||
ethstore = { path = "../ethstore" }
|
ethstore = { path = "../ethstore" }
|
||||||
ethash = { path = "../ethash" }
|
ethash = { path = "../ethash" }
|
||||||
|
@ -28,6 +28,7 @@ extern crate jsonrpc_http_server;
|
|||||||
extern crate ethcore_io as io;
|
extern crate ethcore_io as io;
|
||||||
extern crate ethcore;
|
extern crate ethcore;
|
||||||
extern crate ethkey;
|
extern crate ethkey;
|
||||||
|
extern crate ethcrypto as crypto;
|
||||||
extern crate ethstore;
|
extern crate ethstore;
|
||||||
extern crate ethsync;
|
extern crate ethsync;
|
||||||
extern crate transient_hashmap;
|
extern crate transient_hashmap;
|
||||||
@ -53,7 +54,7 @@ use self::jsonrpc_core::{IoHandler, IoDelegate};
|
|||||||
|
|
||||||
pub use jsonrpc_http_server::{ServerBuilder, Server, RpcServerError};
|
pub use jsonrpc_http_server::{ServerBuilder, Server, RpcServerError};
|
||||||
pub mod v1;
|
pub mod v1;
|
||||||
pub use v1::{SigningQueue, ConfirmationsQueue, NetworkSettings};
|
pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings};
|
||||||
|
|
||||||
/// An object that can be extended with `IoDelegates`
|
/// An object that can be extended with `IoDelegates`
|
||||||
pub trait Extendable {
|
pub trait Extendable {
|
||||||
|
171
rpc/src/v1/helpers/auto_args.rs
Normal file
171
rpc/src/v1/helpers/auto_args.rs
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Automatically serialize and deserialize parameters around a strongly-typed function.
|
||||||
|
|
||||||
|
// because we reuse the type names as idents in the macros as a dirty hack to
|
||||||
|
// work around `concat_idents!` being unstable.
|
||||||
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
|
use super::errors;
|
||||||
|
|
||||||
|
use jsonrpc_core::{Error, Params, Value, from_params, to_value};
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
|
/// Auto-generates an RPC trait from trait definition.
|
||||||
|
///
|
||||||
|
/// This just copies out all the methods, docs, and adds another
|
||||||
|
/// function `to_delegate` which will automatically wrap each strongly-typed
|
||||||
|
/// function in a wrapper which handles parameter and output type serialization.
|
||||||
|
///
|
||||||
|
/// Every function must have a `#[name("rpc_nameHere")]` attribute after
|
||||||
|
/// its documentation, and no other attributes. All function names are
|
||||||
|
/// allowed except for `to_delegate`, which is auto-generated.
|
||||||
|
macro_rules! build_rpc_trait {
|
||||||
|
(
|
||||||
|
$(#[$t_attr: meta])*
|
||||||
|
pub trait $name: ident {
|
||||||
|
$(
|
||||||
|
$(#[doc=$m_doc: expr])* #[name($rpc_name: expr)]
|
||||||
|
fn $method: ident (&self $(, $param: ty)*) -> $out: ty;
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
) => {
|
||||||
|
$(#[$t_attr])*
|
||||||
|
pub trait $name: Sized + Send + Sync + 'static {
|
||||||
|
$(
|
||||||
|
$(#[doc=$m_doc])*
|
||||||
|
fn $method(&self $(, $param)*) -> $out;
|
||||||
|
)*
|
||||||
|
|
||||||
|
/// Transform this into an `IoDelegate`, automatically wrapping
|
||||||
|
/// the parameters.
|
||||||
|
fn to_delegate(self) -> ::jsonrpc_core::IoDelegate<Self> {
|
||||||
|
let mut del = ::jsonrpc_core::IoDelegate::new(self.into());
|
||||||
|
$(
|
||||||
|
del.add_method($rpc_name, move |base, params| {
|
||||||
|
($name::$method as fn(&_ $(, $param)*) -> $out).wrap_rpc(base, params)
|
||||||
|
});
|
||||||
|
)*
|
||||||
|
del
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper type without an implementation of `Deserialize`
|
||||||
|
/// which allows a special implementation of `Wrap` for functions
|
||||||
|
/// that take a trailing default parameter.
|
||||||
|
pub struct Trailing<T: Default + Deserialize>(pub T);
|
||||||
|
|
||||||
|
/// Wrapper trait for synchronous RPC functions.
|
||||||
|
pub trait Wrap<B: Send + Sync + 'static> {
|
||||||
|
fn wrap_rpc(&self, base: &B, params: Params) -> Result<Value, Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
// special impl for no parameters.
|
||||||
|
impl<B, OUT> Wrap<B> for fn(&B) -> Result<OUT, Error>
|
||||||
|
where B: Send + Sync + 'static, OUT: Serialize
|
||||||
|
{
|
||||||
|
fn wrap_rpc(&self, base: &B, params: Params) -> Result<Value, Error> {
|
||||||
|
::v1::helpers::params::expect_no_params(params)
|
||||||
|
.and_then(|()| (self)(base))
|
||||||
|
.map(to_value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates a wrapper implementation which deserializes the parameters,
|
||||||
|
// calls the function with concrete type, and serializes the output.
|
||||||
|
macro_rules! wrap {
|
||||||
|
($($x: ident),+) => {
|
||||||
|
impl <
|
||||||
|
BASE: Send + Sync + 'static,
|
||||||
|
OUT: Serialize,
|
||||||
|
$($x: Deserialize,)+
|
||||||
|
> Wrap<BASE> for fn(&BASE, $($x,)+) -> Result<OUT, Error> {
|
||||||
|
fn wrap_rpc(&self, base: &BASE, params: Params) -> Result<Value, Error> {
|
||||||
|
from_params::<($($x,)+)>(params).and_then(|($($x,)+)| {
|
||||||
|
(self)(base, $($x,)+)
|
||||||
|
}).map(to_value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// special impl for no parameters other than block parameter.
|
||||||
|
impl<B, OUT, T> Wrap<B> for fn(&B, Trailing<T>) -> Result<OUT, Error>
|
||||||
|
where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize
|
||||||
|
{
|
||||||
|
fn wrap_rpc(&self, base: &B, params: Params) -> Result<Value, Error> {
|
||||||
|
let len = match params {
|
||||||
|
Params::Array(ref v) => v.len(),
|
||||||
|
Params::None => 0,
|
||||||
|
_ => return Err(errors::invalid_params("not an array", "")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (id,) = match len {
|
||||||
|
0 => (T::default(),),
|
||||||
|
1 => try!(from_params::<(T,)>(params)),
|
||||||
|
_ => return Err(Error::invalid_params()),
|
||||||
|
};
|
||||||
|
|
||||||
|
(self)(base, Trailing(id)).map(to_value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// similar to `wrap!`, but handles a single default trailing parameter
|
||||||
|
// accepts an additional argument indicating the number of non-trailing parameters.
|
||||||
|
macro_rules! wrap_with_trailing {
|
||||||
|
($num: expr, $($x: ident),+) => {
|
||||||
|
impl <
|
||||||
|
BASE: Send + Sync + 'static,
|
||||||
|
OUT: Serialize,
|
||||||
|
$($x: Deserialize,)+
|
||||||
|
TRAILING: Default + Deserialize,
|
||||||
|
> Wrap<BASE> for fn(&BASE, $($x,)+ Trailing<TRAILING>) -> Result<OUT, Error> {
|
||||||
|
fn wrap_rpc(&self, base: &BASE, params: Params) -> Result<Value, Error> {
|
||||||
|
let len = match params {
|
||||||
|
Params::Array(ref v) => v.len(),
|
||||||
|
Params::None => 0,
|
||||||
|
_ => return Err(errors::invalid_params("not an array", "")),
|
||||||
|
};
|
||||||
|
|
||||||
|
let params = match len - $num {
|
||||||
|
0 => from_params::<($($x,)+)>(params)
|
||||||
|
.map(|($($x,)+)| ($($x,)+ TRAILING::default())),
|
||||||
|
1 => from_params::<($($x,)+ TRAILING)>(params)
|
||||||
|
.map(|($($x,)+ id)| ($($x,)+ id)),
|
||||||
|
_ => Err(Error::invalid_params()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let ($($x,)+ id) = try!(params);
|
||||||
|
(self)(base, $($x,)+ Trailing(id)).map(to_value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wrap!(A, B, C, D, E);
|
||||||
|
wrap!(A, B, C, D);
|
||||||
|
wrap!(A, B, C);
|
||||||
|
wrap!(A, B);
|
||||||
|
wrap!(A);
|
||||||
|
|
||||||
|
wrap_with_trailing!(5, A, B, C, D, E);
|
||||||
|
wrap_with_trailing!(4, A, B, C, D);
|
||||||
|
wrap_with_trailing!(3, A, B, C);
|
||||||
|
wrap_with_trailing!(2, A, B);
|
||||||
|
wrap_with_trailing!(1, A);
|
@ -41,7 +41,7 @@ fn prepare_transaction<C, M>(client: &C, miner: &M, request: TransactionRequest)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<Value, Error>
|
pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<RpcH256, Error>
|
||||||
where C: MiningBlockChainClient, M: MinerService {
|
where C: MiningBlockChainClient, M: MinerService {
|
||||||
let hash = RpcH256::from(signed_transaction.hash());
|
let hash = RpcH256::from(signed_transaction.hash());
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: Sig
|
|||||||
|
|
||||||
import
|
import
|
||||||
.map_err(errors::from_transaction_error)
|
.map_err(errors::from_transaction_error)
|
||||||
.map(|_| to_value(&hash))
|
.map(|_| hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn signature_with_password(accounts: &AccountProvider, address: Address, hash: H256, pass: String) -> Result<Value, Error> {
|
pub fn signature_with_password(accounts: &AccountProvider, address: Address, hash: H256, pass: String) -> Result<Value, Error> {
|
||||||
@ -70,7 +70,7 @@ pub fn unlock_sign_and_dispatch<C, M>(client: &C, miner: &M, request: Transactio
|
|||||||
};
|
};
|
||||||
|
|
||||||
trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty());
|
trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty());
|
||||||
dispatch_transaction(&*client, &*miner, signed_transaction)
|
dispatch_transaction(&*client, &*miner, signed_transaction).map(to_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, request: TransactionRequest, account_provider: &AccountProvider, address: Address) -> Result<Value, Error>
|
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, request: TransactionRequest, account_provider: &AccountProvider, address: Address) -> Result<Value, Error>
|
||||||
@ -84,7 +84,7 @@ pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, request: TransactionReques
|
|||||||
};
|
};
|
||||||
|
|
||||||
trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty());
|
trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty());
|
||||||
dispatch_transaction(&*client, &*miner, signed_transaction)
|
dispatch_transaction(&*client, &*miner, signed_transaction).map(to_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn default_gas_price<C, M>(client: &C, miner: &M) -> U256 where C: MiningBlockChainClient, M: MinerService {
|
pub fn default_gas_price<C, M>(client: &C, miner: &M) -> U256 where C: MiningBlockChainClient, M: MinerService {
|
||||||
|
@ -139,6 +139,13 @@ pub fn no_author() -> Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn token(e: String) -> Error {
|
||||||
|
Error {
|
||||||
|
code: ErrorCode::ServerError(codes::UNKNOWN_ERROR),
|
||||||
|
message: "There was an error when saving your authorization tokens.".into(),
|
||||||
|
data: Some(Value::String(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn signer_disabled() -> Error {
|
pub fn signer_disabled() -> Error {
|
||||||
Error {
|
Error {
|
||||||
|
@ -14,18 +14,25 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
pub mod auto_args;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod errors;
|
pub mod errors;
|
||||||
|
|
||||||
pub mod dispatch;
|
pub mod dispatch;
|
||||||
pub mod params;
|
pub mod params;
|
||||||
|
|
||||||
mod poll_manager;
|
mod poll_manager;
|
||||||
mod poll_filter;
|
mod poll_filter;
|
||||||
mod requests;
|
mod requests;
|
||||||
|
mod signer;
|
||||||
mod signing_queue;
|
mod signing_queue;
|
||||||
mod network_settings;
|
mod network_settings;
|
||||||
|
|
||||||
pub use self::poll_manager::PollManager;
|
pub use self::poll_manager::PollManager;
|
||||||
pub use self::poll_filter::PollFilter;
|
pub use self::poll_filter::{PollFilter, limit_logs};
|
||||||
pub use self::requests::{TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest};
|
pub use self::requests::{TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest};
|
||||||
pub use self::signing_queue::{ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent};
|
pub use self::signing_queue::{ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent};
|
||||||
|
pub use self::signer::SignerService;
|
||||||
pub use self::network_settings::NetworkSettings;
|
pub use self::network_settings::NetworkSettings;
|
||||||
|
@ -36,14 +36,6 @@ pub fn params_len(params: &Params) -> usize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deserialize request parameters with optional second parameter `BlockNumber` defaulting to `BlockNumber::Latest`.
|
|
||||||
pub fn from_params_default_second<F>(params: Params) -> Result<(F, BlockNumber, ), Error> where F: serde::de::Deserialize {
|
|
||||||
match params_len(¶ms) {
|
|
||||||
1 => from_params::<(F, )>(params).map(|(f,)| (f, BlockNumber::Latest)),
|
|
||||||
_ => from_params::<(F, BlockNumber)>(params),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize request parameters with optional third parameter `BlockNumber` defaulting to `BlockNumber::Latest`.
|
/// Deserialize request parameters with optional third parameter `BlockNumber` defaulting to `BlockNumber::Latest`.
|
||||||
pub fn from_params_default_third<F1, F2>(params: Params) -> Result<(F1, F2, BlockNumber, ), Error> where F1: serde::de::Deserialize, F2: serde::de::Deserialize {
|
pub fn from_params_default_third<F1, F2>(params: Params) -> Result<(F1, F2, BlockNumber, ), Error> where F1: serde::de::Deserialize, F2: serde::de::Deserialize {
|
||||||
match params_len(¶ms) {
|
match params_len(¶ms) {
|
||||||
|
@ -13,6 +13,15 @@ pub enum PollFilter {
|
|||||||
Block(BlockNumber),
|
Block(BlockNumber),
|
||||||
/// Hashes of all transactions which client was notified about.
|
/// Hashes of all transactions which client was notified about.
|
||||||
PendingTransaction(Vec<H256>),
|
PendingTransaction(Vec<H256>),
|
||||||
/// Number of From block number, pending logs and log filter iself.
|
/// Number of From block number, pending logs and log filter itself.
|
||||||
Logs(BlockNumber, HashSet<Log>, Filter)
|
Logs(BlockNumber, HashSet<Log>, Filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns only last `n` logs
|
||||||
|
pub fn limit_logs(mut logs: Vec<Log>, limit: Option<usize>) -> Vec<Log> {
|
||||||
|
let len = logs.len();
|
||||||
|
match limit {
|
||||||
|
Some(limit) if len >= limit => logs.split_off(len - limit),
|
||||||
|
_ => logs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
61
rpc/src/v1/helpers/signer.rs
Normal file
61
rpc/src/v1/helpers/signer.rs
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::ops::Deref;
|
||||||
|
use v1::helpers::signing_queue::{ConfirmationsQueue};
|
||||||
|
|
||||||
|
/// Manages communication with Signer crate
|
||||||
|
pub struct SignerService {
|
||||||
|
queue: Arc<ConfirmationsQueue>,
|
||||||
|
generate_new_token: Box<Fn() -> Result<String, String> + Send + Sync + 'static>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignerService {
|
||||||
|
|
||||||
|
/// Creates new Signer Service given function to generate new tokens.
|
||||||
|
pub fn new<F>(new_token: F) -> Self
|
||||||
|
where F: Fn() -> Result<String, String> + Send + Sync + 'static {
|
||||||
|
SignerService {
|
||||||
|
queue: Arc::new(ConfirmationsQueue::default()),
|
||||||
|
generate_new_token: Box::new(new_token),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generates new token.
|
||||||
|
pub fn generate_token(&self) -> Result<String, String> {
|
||||||
|
(self.generate_new_token)()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to `ConfirmationsQueue`
|
||||||
|
pub fn queue(&self) -> Arc<ConfirmationsQueue> {
|
||||||
|
self.queue.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
/// Creates new Signer Service for tests.
|
||||||
|
pub fn new_test() -> Self {
|
||||||
|
SignerService::new(|| Ok("new_token".into()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for SignerService {
|
||||||
|
type Target = ConfirmationsQueue;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.queue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -42,10 +42,14 @@ use ethcore::log_entry::LogEntry;
|
|||||||
use ethcore::filter::Filter as EthcoreFilter;
|
use ethcore::filter::Filter as EthcoreFilter;
|
||||||
use self::ethash::SeedHashCompute;
|
use self::ethash::SeedHashCompute;
|
||||||
use v1::traits::Eth;
|
use v1::traits::Eth;
|
||||||
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256};
|
use v1::types::{
|
||||||
use v1::helpers::{CallRequest as CRequest, errors};
|
Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo,
|
||||||
|
Transaction, CallRequest, Index, Filter, Log, Receipt, Work,
|
||||||
|
H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256,
|
||||||
|
};
|
||||||
|
use v1::helpers::{CallRequest as CRequest, errors, limit_logs};
|
||||||
use v1::helpers::dispatch::{default_gas_price, dispatch_transaction};
|
use v1::helpers::dispatch::{default_gas_price, dispatch_transaction};
|
||||||
use v1::helpers::params::{expect_no_params, params_len, from_params_default_second, from_params_default_third};
|
use v1::helpers::auto_args::Trailing;
|
||||||
|
|
||||||
/// Eth RPC options
|
/// Eth RPC options
|
||||||
pub struct EthClientOptions {
|
pub struct EthClientOptions {
|
||||||
@ -100,7 +104,7 @@ impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self, id: BlockID, include_txs: bool) -> Result<Value, Error> {
|
fn block(&self, id: BlockID, include_txs: bool) -> Result<Option<Block>, Error> {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
match (client.block(id.clone()), client.block_total_difficulty(id)) {
|
match (client.block(id.clone()), client.block_total_difficulty(id)) {
|
||||||
(Some(bytes), Some(total_difficulty)) => {
|
(Some(bytes), Some(total_difficulty)) => {
|
||||||
@ -131,28 +135,28 @@ impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
|
|||||||
},
|
},
|
||||||
extra_data: Bytes::new(view.extra_data())
|
extra_data: Bytes::new(view.extra_data())
|
||||||
};
|
};
|
||||||
Ok(to_value(&block))
|
Ok(Some(block))
|
||||||
},
|
},
|
||||||
_ => Ok(Value::Null)
|
_ => Ok(None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction(&self, id: TransactionID) -> Result<Value, Error> {
|
fn transaction(&self, id: TransactionID) -> Result<Option<Transaction>, Error> {
|
||||||
match take_weak!(self.client).transaction(id) {
|
match take_weak!(self.client).transaction(id) {
|
||||||
Some(t) => Ok(to_value(&Transaction::from(t))),
|
Some(t) => Ok(Some(Transaction::from(t))),
|
||||||
None => Ok(Value::Null)
|
None => Ok(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncle(&self, id: UncleID) -> Result<Value, Error> {
|
fn uncle(&self, id: UncleID) -> Result<Option<Block>, Error> {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
let uncle: BlockHeader = match client.uncle(id) {
|
let uncle: BlockHeader = match client.uncle(id) {
|
||||||
Some(rlp) => rlp::decode(&rlp),
|
Some(rlp) => rlp::decode(&rlp),
|
||||||
None => { return Ok(Value::Null); }
|
None => { return Ok(None); }
|
||||||
};
|
};
|
||||||
let parent_difficulty = match client.block_total_difficulty(BlockID::Hash(uncle.parent_hash().clone())) {
|
let parent_difficulty = match client.block_total_difficulty(BlockID::Hash(uncle.parent_hash().clone())) {
|
||||||
Some(difficulty) => difficulty,
|
Some(difficulty) => difficulty,
|
||||||
None => { return Ok(Value::Null); }
|
None => { return Ok(None); }
|
||||||
};
|
};
|
||||||
|
|
||||||
let block = Block {
|
let block = Block {
|
||||||
@ -177,7 +181,7 @@ impl<C, S: ?Sized, M, EM> EthClient<C, S, M, EM> where
|
|||||||
uncles: vec![],
|
uncles: vec![],
|
||||||
transactions: BlockTransactions::Hashes(vec![]),
|
transactions: BlockTransactions::Hashes(vec![]),
|
||||||
};
|
};
|
||||||
Ok(to_value(&block))
|
Ok(Some(block))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_call(&self, request: CRequest) -> Result<SignedTransaction, Error> {
|
fn sign_call(&self, request: CRequest) -> Result<SignedTransaction, Error> {
|
||||||
@ -240,20 +244,19 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
M: MinerService + 'static,
|
M: MinerService + 'static,
|
||||||
EM: ExternalMinerService + 'static {
|
EM: ExternalMinerService + 'static {
|
||||||
|
|
||||||
fn protocol_version(&self, params: Params) -> Result<Value, Error> {
|
fn protocol_version(&self) -> Result<String, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned()))
|
let version = take_weak!(self.sync).status().protocol_version.to_owned();
|
||||||
|
Ok(format!("{}", version))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn syncing(&self, params: Params) -> Result<Value, Error> {
|
fn syncing(&self) -> Result<SyncStatus, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
let status = take_weak!(self.sync).status();
|
let status = take_weak!(self.sync).status();
|
||||||
let res = match status.state {
|
match status.state {
|
||||||
SyncState::Idle => SyncStatus::None,
|
SyncState::Idle => Ok(SyncStatus::None),
|
||||||
SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead
|
SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead
|
||||||
| SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => {
|
| SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => {
|
||||||
let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number);
|
let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number);
|
||||||
@ -265,271 +268,242 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
current_block: current_block.into(),
|
current_block: current_block.into(),
|
||||||
highest_block: highest_block.into(),
|
highest_block: highest_block.into(),
|
||||||
};
|
};
|
||||||
SyncStatus::Info(info)
|
Ok(SyncStatus::Info(info))
|
||||||
} else {
|
} else {
|
||||||
SyncStatus::None
|
Ok(SyncStatus::None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
Ok(to_value(&res))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn author(&self, params: Params) -> Result<Value, Error> {
|
fn author(&self) -> Result<RpcH160, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
Ok(to_value(&RpcH160::from(take_weak!(self.miner).author())))
|
Ok(RpcH160::from(take_weak!(self.miner).author()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_mining(&self, params: Params) -> Result<Value, Error> {
|
fn is_mining(&self) -> Result<bool, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
Ok(to_value(&(take_weak!(self.miner).is_sealing())))
|
Ok(take_weak!(self.miner).is_sealing())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hashrate(&self, params: Params) -> Result<Value, Error> {
|
fn hashrate(&self) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
Ok(to_value(&RpcU256::from(self.external_miner.hashrate())))
|
Ok(RpcU256::from(self.external_miner.hashrate()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gas_price(&self, params: Params) -> Result<Value, Error> {
|
fn gas_price(&self) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
||||||
Ok(to_value(&RpcU256::from(default_gas_price(&*client, &*miner))))
|
Ok(RpcU256::from(default_gas_price(&*client, &*miner)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accounts(&self, params: Params) -> Result<Value, Error> {
|
fn accounts(&self) -> Result<Vec<RpcH160>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
let store = take_weak!(self.accounts);
|
let store = take_weak!(self.accounts);
|
||||||
let accounts = try!(store.accounts().map_err(|e| errors::internal("Could not fetch accounts.", e)));
|
let accounts = try!(store.accounts().map_err(|e| errors::internal("Could not fetch accounts.", e)));
|
||||||
Ok(to_value(&accounts.into_iter().map(Into::into).collect::<Vec<RpcH160>>()))
|
Ok(accounts.into_iter().map(Into::into).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_number(&self) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
Ok(to_value(&RpcU256::from(take_weak!(self.client).chain_info().best_block_number)))
|
Ok(RpcU256::from(take_weak!(self.client).chain_info().best_block_number))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn balance(&self, params: Params) -> Result<Value, Error> {
|
fn balance(&self, address: RpcH160, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
|
||||||
.and_then(|(address, block_number,)| {
|
let address = address.into();
|
||||||
let address: Address = RpcH160::into(address);
|
match num.0 {
|
||||||
match block_number {
|
BlockNumber::Pending => Ok(take_weak!(self.miner).balance(&*take_weak!(self.client), &address).into()),
|
||||||
BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).balance(&*take_weak!(self.client), &address)))),
|
id => match take_weak!(self.client).balance(&address, id.into()) {
|
||||||
id => match take_weak!(self.client).balance(&address, id.into()) {
|
Some(balance) => Ok(balance.into()),
|
||||||
Some(balance) => Ok(to_value(&RpcU256::from(balance))),
|
None => Err(errors::state_pruned()),
|
||||||
None => Err(errors::state_pruned()),
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn storage_at(&self, params: Params) -> Result<Value, Error> {
|
fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing<BlockNumber>) -> Result<RpcH256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params_default_third::<RpcH160, RpcU256>(params)
|
let address: Address = RpcH160::into(address);
|
||||||
.and_then(|(address, position, block_number,)| {
|
let position: U256 = RpcU256::into(pos);
|
||||||
let address: Address = RpcH160::into(address);
|
match num.0 {
|
||||||
let position: U256 = RpcU256::into(position);
|
BlockNumber::Pending => Ok(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position)).into()),
|
||||||
match block_number {
|
id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) {
|
||||||
BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position))))),
|
Some(s) => Ok(s.into()),
|
||||||
id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) {
|
None => Err(errors::state_pruned()),
|
||||||
Some(s) => Ok(to_value(&RpcH256::from(s))),
|
}
|
||||||
None => Err(errors::state_pruned()),
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_count(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
|
||||||
.and_then(|(address, block_number,)| {
|
let address: Address = RpcH160::into(address);
|
||||||
let address: Address = RpcH160::into(address);
|
match num.0 {
|
||||||
match block_number {
|
BlockNumber::Pending => Ok(take_weak!(self.miner).nonce(&*take_weak!(self.client), &address).into()),
|
||||||
BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).nonce(&*take_weak!(self.client), &address)))),
|
id => match take_weak!(self.client).nonce(&address, id.into()) {
|
||||||
id => match take_weak!(self.client).nonce(&address, id.into()) {
|
Some(nonce) => Ok(nonce.into()),
|
||||||
Some(nonce) => Ok(to_value(&RpcU256::from(nonce))),
|
None => Err(errors::state_pruned()),
|
||||||
None => Err(errors::state_pruned()),
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_transaction_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn block_transaction_count_by_hash(&self, hash: RpcH256) -> Result<Option<RpcU256>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256,)>(params)
|
Ok(
|
||||||
.and_then(|(hash,)| // match
|
take_weak!(self.client).block(BlockID::Hash(hash.into()))
|
||||||
take_weak!(self.client).block(BlockID::Hash(hash.into()))
|
.map(|bytes| BlockView::new(&bytes).transactions_count().into())
|
||||||
.map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count())))))
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_transaction_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_transaction_count_by_number(&self, num: BlockNumber) -> Result<Option<RpcU256>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(BlockNumber,)>(params)
|
|
||||||
.and_then(|(block_number,)| match block_number {
|
match num {
|
||||||
BlockNumber::Pending => Ok(to_value(
|
BlockNumber::Pending => Ok(Some(
|
||||||
&RpcU256::from(take_weak!(self.miner).status().transactions_in_pending_block)
|
take_weak!(self.miner).status().transactions_in_pending_block.into()
|
||||||
)),
|
)),
|
||||||
_ => take_weak!(self.client).block(block_number.into())
|
_ => Ok(
|
||||||
.map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count()))))
|
take_weak!(self.client).block(num.into())
|
||||||
})
|
.map(|bytes| BlockView::new(&bytes).transactions_count().into())
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_uncles_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn block_uncles_count_by_hash(&self, hash: RpcH256) -> Result<Option<RpcU256>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256,)>(params)
|
|
||||||
.and_then(|(hash,)|
|
Ok(
|
||||||
take_weak!(self.client).block(BlockID::Hash(hash.into()))
|
take_weak!(self.client).block(BlockID::Hash(hash.into()))
|
||||||
.map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count())))))
|
.map(|bytes| BlockView::new(&bytes).uncles_count().into())
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_uncles_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_uncles_count_by_number(&self, num: BlockNumber) -> Result<Option<RpcU256>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(BlockNumber,)>(params)
|
|
||||||
.and_then(|(block_number,)| match block_number {
|
match num {
|
||||||
BlockNumber::Pending => Ok(to_value(&RpcU256::from(0))),
|
BlockNumber::Pending => Ok(Some(0.into())),
|
||||||
_ => take_weak!(self.client).block(block_number.into())
|
_ => Ok(
|
||||||
.map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count()))))
|
take_weak!(self.client).block(num.into())
|
||||||
})
|
.map(|bytes| BlockView::new(&bytes).uncles_count().into())
|
||||||
|
),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn code_at(&self, params: Params) -> Result<Value, Error> {
|
fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> Result<Bytes, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
|
||||||
.and_then(|(address, block_number,)| {
|
let address: Address = RpcH160::into(address);
|
||||||
let address: Address = RpcH160::into(address);
|
match num.0 {
|
||||||
match block_number {
|
BlockNumber::Pending => Ok(take_weak!(self.miner).code(&*take_weak!(self.client), &address).map_or_else(Bytes::default, Bytes::new)),
|
||||||
BlockNumber::Pending => Ok(to_value(&take_weak!(self.miner).code(&*take_weak!(self.client), &address).map_or_else(Bytes::default, Bytes::new))),
|
_ => match take_weak!(self.client).code(&address, num.0.into()) {
|
||||||
_ => match take_weak!(self.client).code(&address, block_number.into()) {
|
Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)),
|
||||||
Some(code) => Ok(to_value(&code.map_or_else(Bytes::default, Bytes::new))),
|
None => Err(errors::state_pruned()),
|
||||||
None => Err(errors::state_pruned()),
|
},
|
||||||
},
|
}
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> Result<Option<Block>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256, bool)>(params)
|
|
||||||
.and_then(|(hash, include_txs)| self.block(BlockID::Hash(hash.into()), include_txs))
|
self.block(BlockID::Hash(hash.into()), include_txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_by_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> Result<Option<Block>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(BlockNumber, bool)>(params)
|
|
||||||
.and_then(|(number, include_txs)| self.block(number.into(), include_txs))
|
self.block(num.into(), include_txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_by_hash(&self, hash: RpcH256) -> Result<Option<Transaction>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256,)>(params)
|
|
||||||
.and_then(|(hash,)| {
|
let miner = take_weak!(self.miner);
|
||||||
let miner = take_weak!(self.miner);
|
let hash: H256 = hash.into();
|
||||||
let hash: H256 = hash.into();
|
match miner.transaction(&hash) {
|
||||||
match miner.transaction(&hash) {
|
Some(pending_tx) => Ok(Some(pending_tx.into())),
|
||||||
Some(pending_tx) => Ok(to_value(&Transaction::from(pending_tx))),
|
None => self.transaction(TransactionID::Hash(hash))
|
||||||
None => self.transaction(TransactionID::Hash(hash))
|
}
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_by_block_hash_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result<Option<Transaction>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256, Index)>(params)
|
|
||||||
.and_then(|(hash, index)| self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value())))
|
self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_by_block_number_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result<Option<Transaction>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(BlockNumber, Index)>(params)
|
|
||||||
.and_then(|(number, index)| self.transaction(TransactionID::Location(number.into(), index.value())))
|
self.transaction(TransactionID::Location(num.into(), index.value()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_receipt(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_receipt(&self, hash: RpcH256) -> Result<Option<Receipt>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256,)>(params)
|
|
||||||
.and_then(|(hash,)| {
|
let miner = take_weak!(self.miner);
|
||||||
let miner = take_weak!(self.miner);
|
let hash: H256 = hash.into();
|
||||||
let hash: H256 = hash.into();
|
match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) {
|
||||||
match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) {
|
(Some(receipt), true) => Ok(Some(receipt.into())),
|
||||||
(Some(receipt), true) => Ok(to_value(&Receipt::from(receipt))),
|
_ => {
|
||||||
_ => {
|
let client = take_weak!(self.client);
|
||||||
let client = take_weak!(self.client);
|
let receipt = client.transaction_receipt(TransactionID::Hash(hash));
|
||||||
let receipt = client.transaction_receipt(TransactionID::Hash(hash));
|
Ok(receipt.map(Into::into))
|
||||||
Ok(to_value(&receipt.map(Receipt::from)))
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncle_by_block_hash_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn uncle_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result<Option<Block>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH256, Index)>(params)
|
|
||||||
.and_then(|(hash, index)| self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() }))
|
self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncle_by_block_number_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result<Option<Block>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(BlockNumber, Index)>(params)
|
|
||||||
.and_then(|(number, index)| self.uncle(UncleID { block: number.into(), position: index.value() }))
|
self.uncle(UncleID { block: num.into(), position: index.value() })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compilers(&self, params: Params) -> Result<Value, Error> {
|
fn compilers(&self) -> Result<Vec<String>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
let mut compilers = vec![];
|
let mut compilers = vec![];
|
||||||
if Command::new(SOLC).output().is_ok() {
|
if Command::new(SOLC).output().is_ok() {
|
||||||
compilers.push("solidity".to_owned())
|
compilers.push("solidity".to_owned())
|
||||||
}
|
}
|
||||||
Ok(to_value(&compilers))
|
|
||||||
|
Ok(compilers)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn logs(&self, params: Params) -> Result<Value, Error> {
|
fn logs(&self, filter: Filter) -> Result<Vec<Log>, Error> {
|
||||||
try!(self.active());
|
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||||
let params = match params_len(¶ms) {
|
let filter: EthcoreFilter = filter.into();
|
||||||
1 => from_params::<(Filter, )>(params).map(|(filter, )| (filter, None)),
|
let mut logs = take_weak!(self.client).logs(filter.clone())
|
||||||
_ => from_params::<(Filter, usize)>(params).map(|(filter, val)| (filter, Some(val))),
|
.into_iter()
|
||||||
};
|
.map(From::from)
|
||||||
params.and_then(|(filter, limit)| {
|
.collect::<Vec<Log>>();
|
||||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
|
||||||
let filter: EthcoreFilter = filter.into();
|
|
||||||
let mut logs = take_weak!(self.client).logs(filter.clone(), limit)
|
|
||||||
.into_iter()
|
|
||||||
.map(From::from)
|
|
||||||
.collect::<Vec<Log>>();
|
|
||||||
|
|
||||||
if include_pending {
|
if include_pending {
|
||||||
let pending = pending_logs(&*take_weak!(self.miner), &filter);
|
let pending = pending_logs(&*take_weak!(self.miner), &filter);
|
||||||
logs.extend(pending);
|
logs.extend(pending);
|
||||||
}
|
}
|
||||||
|
|
||||||
let len = logs.len();
|
let logs = limit_logs(logs, filter.limit);
|
||||||
match limit {
|
|
||||||
Some(limit) if len >= limit => {
|
|
||||||
logs = logs.split_off(len - limit);
|
|
||||||
},
|
|
||||||
_ => {},
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(to_value(&logs))
|
Ok(logs)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn work(&self, params: Params) -> Result<Value, Error> {
|
fn work(&self, no_new_work_timeout: Trailing<u64>) -> Result<Work, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
let (no_new_work_timeout,) = from_params::<(u64,)>(params).unwrap_or((0,));
|
let no_new_work_timeout = no_new_work_timeout.0;
|
||||||
|
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
// check if we're still syncing and return empty strings in that case
|
// check if we're still syncing and return empty strings in that case
|
||||||
@ -561,115 +535,118 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 {
|
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 {
|
||||||
Err(errors::no_new_work())
|
Err(errors::no_new_work())
|
||||||
} else if self.options.send_block_number_in_get_work {
|
} else if self.options.send_block_number_in_get_work {
|
||||||
let block_number = RpcU256::from(b.block().header().number());
|
let block_number = b.block().header().number();
|
||||||
Ok(to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number)))
|
Ok(Work {
|
||||||
|
pow_hash: pow_hash.into(),
|
||||||
|
seed_hash: seed_hash.into(),
|
||||||
|
target: target.into(),
|
||||||
|
number: Some(block_number),
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
Ok(to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target))))
|
Ok(Work {
|
||||||
|
pow_hash: pow_hash.into(),
|
||||||
|
seed_hash: seed_hash.into(),
|
||||||
|
target: target.into(),
|
||||||
|
number: None
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}).unwrap_or(Err(Error::internal_error())) // no work found.
|
}).unwrap_or(Err(Error::internal_error())) // no work found.
|
||||||
}
|
}
|
||||||
|
|
||||||
fn submit_work(&self, params: Params) -> Result<Value, Error> {
|
fn submit_work(&self, nonce: RpcH64, pow_hash: RpcH256, mix_hash: RpcH256) -> Result<bool, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcH64, RpcH256, RpcH256)>(params).and_then(|(nonce, pow_hash, mix_hash)| {
|
|
||||||
let nonce: H64 = nonce.into();
|
let nonce: H64 = nonce.into();
|
||||||
let pow_hash: H256 = pow_hash.into();
|
let pow_hash: H256 = pow_hash.into();
|
||||||
let mix_hash: H256 = mix_hash.into();
|
let mix_hash: H256 = mix_hash.into();
|
||||||
trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
|
trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
|
||||||
let miner = take_weak!(self.miner);
|
|
||||||
let client = take_weak!(self.client);
|
let miner = take_weak!(self.miner);
|
||||||
let seal = vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()];
|
let client = take_weak!(self.client);
|
||||||
let r = miner.submit_seal(&*client, pow_hash, seal);
|
let seal = vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()];
|
||||||
Ok(to_value(&r.is_ok()))
|
Ok(miner.submit_seal(&*client, pow_hash, seal).is_ok())
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn submit_hashrate(&self, params: Params) -> Result<Value, Error> {
|
fn submit_hashrate(&self, rate: RpcU256, id: RpcH256) -> Result<bool, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(RpcU256, RpcH256)>(params).and_then(|(rate, id)| {
|
self.external_miner.submit_hashrate(rate.into(), id.into());
|
||||||
self.external_miner.submit_hashrate(rate.into(), id.into());
|
Ok(true)
|
||||||
Ok(to_value(&true))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_raw_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(Bytes, )>(params)
|
|
||||||
.and_then(|(raw_transaction, )| {
|
let raw_transaction = raw.to_vec();
|
||||||
let raw_transaction = raw_transaction.to_vec();
|
match UntrustedRlp::new(&raw_transaction).as_val() {
|
||||||
match UntrustedRlp::new(&raw_transaction).as_val() {
|
Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction),
|
||||||
Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction),
|
Err(_) => Ok(RpcH256::from(H256::from(0))),
|
||||||
Err(_) => Ok(to_value(&RpcH256::from(H256::from(0)))),
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&self, request: CallRequest, num: Trailing<BlockNumber>) -> Result<Bytes, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
|
||||||
|
let request = CallRequest::into(request);
|
||||||
|
let signed = try!(self.sign_call(request));
|
||||||
|
|
||||||
|
let r = match num.0 {
|
||||||
|
BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()),
|
||||||
|
num => take_weak!(self.client).call(&signed, num.into(), Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![])))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn estimate_gas(&self, request: CallRequest, num: Trailing<BlockNumber>) -> Result<RpcU256, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
|
||||||
|
let request = CallRequest::into(request);
|
||||||
|
let signed = try!(self.sign_call(request));
|
||||||
|
let r = match num.0 {
|
||||||
|
BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()),
|
||||||
|
num => take_weak!(self.client).call(&signed, num.into(), Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0))))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compile_lll(&self, _: String) -> Result<Bytes, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
|
||||||
|
rpc_unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compile_serpent(&self, _: String) -> Result<Bytes, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
|
||||||
|
rpc_unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compile_solidity(&self, code: String) -> Result<Bytes, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
let maybe_child = Command::new(SOLC)
|
||||||
|
.arg("--bin")
|
||||||
|
.arg("--optimize")
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::null())
|
||||||
|
.spawn();
|
||||||
|
|
||||||
|
maybe_child
|
||||||
|
.map_err(errors::compilation)
|
||||||
|
.and_then(|mut child| {
|
||||||
|
try!(child.stdin.as_mut()
|
||||||
|
.expect("we called child.stdin(Stdio::piped()) before spawn; qed")
|
||||||
|
.write_all(code.as_bytes())
|
||||||
|
.map_err(errors::compilation));
|
||||||
|
let output = try!(child.wait_with_output().map_err(errors::compilation));
|
||||||
|
|
||||||
|
let s = String::from_utf8_lossy(&output.stdout);
|
||||||
|
if let Some(hex) = s.lines().skip_while(|ref l| !l.contains("Binary")).skip(1).next() {
|
||||||
|
Ok(Bytes::new(hex.from_hex().unwrap_or(vec![])))
|
||||||
|
} else {
|
||||||
|
Err(errors::compilation("Unexpected output."))
|
||||||
}
|
}
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn call(&self, params: Params) -> Result<Value, Error> {
|
|
||||||
try!(self.active());
|
|
||||||
from_params_default_second(params)
|
|
||||||
.and_then(|(request, block_number,)| {
|
|
||||||
let request = CallRequest::into(request);
|
|
||||||
let signed = try!(self.sign_call(request));
|
|
||||||
let r = match block_number {
|
|
||||||
BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()),
|
|
||||||
block_number => take_weak!(self.client).call(&signed, block_number.into(), Default::default()),
|
|
||||||
};
|
|
||||||
Ok(to_value(&r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![]))))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn estimate_gas(&self, params: Params) -> Result<Value, Error> {
|
|
||||||
try!(self.active());
|
|
||||||
from_params_default_second(params)
|
|
||||||
.and_then(|(request, block_number,)| {
|
|
||||||
let request = CallRequest::into(request);
|
|
||||||
let signed = try!(self.sign_call(request));
|
|
||||||
let r = match block_number {
|
|
||||||
BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()),
|
|
||||||
block => take_weak!(self.client).call(&signed, block.into(), Default::default()),
|
|
||||||
};
|
|
||||||
Ok(to_value(&RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0)))))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compile_lll(&self, _: Params) -> Result<Value, Error> {
|
|
||||||
try!(self.active());
|
|
||||||
rpc_unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compile_serpent(&self, _: Params) -> Result<Value, Error> {
|
|
||||||
try!(self.active());
|
|
||||||
rpc_unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compile_solidity(&self, params: Params) -> Result<Value, Error> {
|
|
||||||
try!(self.active());
|
|
||||||
from_params::<(String, )>(params)
|
|
||||||
.and_then(|(code, )| {
|
|
||||||
let maybe_child = Command::new(SOLC)
|
|
||||||
.arg("--bin")
|
|
||||||
.arg("--optimize")
|
|
||||||
.stdin(Stdio::piped())
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.stderr(Stdio::null())
|
|
||||||
.spawn();
|
|
||||||
|
|
||||||
maybe_child
|
|
||||||
.map_err(errors::compilation)
|
|
||||||
.and_then(|mut child| {
|
|
||||||
try!(child.stdin.as_mut()
|
|
||||||
.expect("we called child.stdin(Stdio::piped()) before spawn; qed")
|
|
||||||
.write_all(code.as_bytes())
|
|
||||||
.map_err(errors::compilation));
|
|
||||||
let output = try!(child.wait_with_output().map_err(errors::compilation));
|
|
||||||
|
|
||||||
let s = String::from_utf8_lossy(&output.stdout);
|
|
||||||
if let Some(hex) = s.lines().skip_while(|ref l| !l.contains("Binary")).skip(1).next() {
|
|
||||||
Ok(to_value(&Bytes::new(hex.from_hex().unwrap_or(vec![]))))
|
|
||||||
} else {
|
|
||||||
Err(errors::compilation("Unexpected output."))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,9 +24,8 @@ use ethcore::filter::Filter as EthcoreFilter;
|
|||||||
use ethcore::client::{BlockChainClient, BlockID};
|
use ethcore::client::{BlockChainClient, BlockID};
|
||||||
use util::Mutex;
|
use util::Mutex;
|
||||||
use v1::traits::EthFilter;
|
use v1::traits::EthFilter;
|
||||||
use v1::types::{BlockNumber, Index, Filter, Log, H256 as RpcH256, U256 as RpcU256};
|
use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256};
|
||||||
use v1::helpers::{PollFilter, PollManager};
|
use v1::helpers::{PollFilter, PollManager, limit_logs};
|
||||||
use v1::helpers::params::expect_no_params;
|
|
||||||
use v1::impls::eth::pending_logs;
|
use v1::impls::eth::pending_logs;
|
||||||
|
|
||||||
/// Eth filter rpc implementation.
|
/// Eth filter rpc implementation.
|
||||||
@ -59,164 +58,154 @@ impl<C, M> EthFilterClient<C, M> where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> EthFilter for EthFilterClient<C, M> where
|
impl<C, M> EthFilter for EthFilterClient<C, M>
|
||||||
C: BlockChainClient + 'static,
|
where C: BlockChainClient + 'static, M: MinerService + 'static
|
||||||
M: MinerService + 'static {
|
{
|
||||||
|
fn new_filter(&self, filter: Filter) -> Result<RpcU256, Error> {
|
||||||
fn new_filter(&self, params: Params) -> Result<Value, Error> {
|
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(Filter,)>(params)
|
let mut polls = self.polls.lock();
|
||||||
.and_then(|(filter,)| {
|
let block_number = take_weak!(self.client).chain_info().best_block_number;
|
||||||
let mut polls = self.polls.lock();
|
let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter));
|
||||||
let block_number = take_weak!(self.client).chain_info().best_block_number;
|
Ok(id.into())
|
||||||
let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter));
|
|
||||||
Ok(to_value(&RpcU256::from(id)))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_block_filter(&self, params: Params) -> Result<Value, Error> {
|
fn new_block_filter(&self) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
let mut polls = self.polls.lock();
|
let mut polls = self.polls.lock();
|
||||||
let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number));
|
let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number));
|
||||||
Ok(to_value(&RpcU256::from(id)))
|
Ok(id.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_pending_transaction_filter(&self, params: Params) -> Result<Value, Error> {
|
fn new_pending_transaction_filter(&self) -> Result<RpcU256, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
|
||||||
|
|
||||||
let mut polls = self.polls.lock();
|
let mut polls = self.polls.lock();
|
||||||
let pending_transactions = take_weak!(self.miner).pending_transactions_hashes();
|
let pending_transactions = take_weak!(self.miner).pending_transactions_hashes();
|
||||||
let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions));
|
let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions));
|
||||||
|
Ok(id.into())
|
||||||
Ok(to_value(&RpcU256::from(id)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_changes(&self, params: Params) -> Result<Value, Error> {
|
fn filter_changes(&self, index: Index) -> Result<FilterChanges, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
from_params::<(Index,)>(params)
|
let mut polls = self.polls.lock();
|
||||||
.and_then(|(index,)| {
|
match polls.poll_mut(&index.value()) {
|
||||||
let mut polls = self.polls.lock();
|
None => Ok(FilterChanges::Empty),
|
||||||
match polls.poll_mut(&index.value()) {
|
Some(filter) => match *filter {
|
||||||
None => Ok(Value::Array(vec![] as Vec<Value>)),
|
PollFilter::Block(ref mut block_number) => {
|
||||||
Some(filter) => match *filter {
|
// + 1, cause we want to return hashes including current block hash.
|
||||||
PollFilter::Block(ref mut block_number) => {
|
let current_number = client.chain_info().best_block_number + 1;
|
||||||
// + 1, cause we want to return hashes including current block hash.
|
let hashes = (*block_number..current_number).into_iter()
|
||||||
let current_number = client.chain_info().best_block_number + 1;
|
.map(BlockID::Number)
|
||||||
let hashes = (*block_number..current_number).into_iter()
|
.filter_map(|id| client.block_hash(id))
|
||||||
.map(BlockID::Number)
|
.map(Into::into)
|
||||||
.filter_map(|id| client.block_hash(id))
|
.collect::<Vec<RpcH256>>();
|
||||||
.map(Into::into)
|
|
||||||
.collect::<Vec<RpcH256>>();
|
|
||||||
|
|
||||||
*block_number = current_number;
|
*block_number = current_number;
|
||||||
|
|
||||||
Ok(to_value(&hashes))
|
Ok(FilterChanges::Hashes(hashes))
|
||||||
},
|
},
|
||||||
PollFilter::PendingTransaction(ref mut previous_hashes) => {
|
PollFilter::PendingTransaction(ref mut previous_hashes) => {
|
||||||
// get hashes of pending transactions
|
// get hashes of pending transactions
|
||||||
let current_hashes = take_weak!(self.miner).pending_transactions_hashes();
|
let current_hashes = take_weak!(self.miner).pending_transactions_hashes();
|
||||||
|
|
||||||
let new_hashes =
|
let new_hashes =
|
||||||
{
|
{
|
||||||
let previous_hashes_set = previous_hashes.iter().collect::<HashSet<_>>();
|
let previous_hashes_set = previous_hashes.iter().collect::<HashSet<_>>();
|
||||||
|
|
||||||
// find all new hashes
|
// find all new hashes
|
||||||
current_hashes
|
current_hashes
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|hash| !previous_hashes_set.contains(hash))
|
.filter(|hash| !previous_hashes_set.contains(hash))
|
||||||
.cloned()
|
.cloned()
|
||||||
.map(Into::into)
|
.map(Into::into)
|
||||||
.collect::<Vec<RpcH256>>()
|
.collect::<Vec<RpcH256>>()
|
||||||
};
|
};
|
||||||
|
|
||||||
// save all hashes of pending transactions
|
// save all hashes of pending transactions
|
||||||
*previous_hashes = current_hashes;
|
*previous_hashes = current_hashes;
|
||||||
|
|
||||||
// return new hashes
|
// return new hashes
|
||||||
Ok(to_value(&new_hashes))
|
Ok(FilterChanges::Hashes(new_hashes))
|
||||||
},
|
},
|
||||||
PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => {
|
PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => {
|
||||||
// retrive the current block number
|
// retrive the current block number
|
||||||
let current_number = client.chain_info().best_block_number;
|
let current_number = client.chain_info().best_block_number;
|
||||||
|
|
||||||
// check if we need to check pending hashes
|
// check if we need to check pending hashes
|
||||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||||
|
|
||||||
// build appropriate filter
|
// build appropriate filter
|
||||||
let mut filter: EthcoreFilter = filter.clone().into();
|
let mut filter: EthcoreFilter = filter.clone().into();
|
||||||
filter.from_block = BlockID::Number(*block_number);
|
filter.from_block = BlockID::Number(*block_number);
|
||||||
filter.to_block = BlockID::Latest;
|
filter.to_block = BlockID::Latest;
|
||||||
|
|
||||||
// retrieve logs in range from_block..min(BlockID::Latest..to_block)
|
// retrieve logs in range from_block..min(BlockID::Latest..to_block)
|
||||||
let mut logs = client.logs(filter.clone(), None)
|
let mut logs = client.logs(filter.clone())
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(From::from)
|
.map(From::from)
|
||||||
.collect::<Vec<Log>>();
|
.collect::<Vec<Log>>();
|
||||||
|
|
||||||
// additionally retrieve pending logs
|
// additionally retrieve pending logs
|
||||||
if include_pending {
|
if include_pending {
|
||||||
let pending_logs = pending_logs(&*take_weak!(self.miner), &filter);
|
let pending_logs = pending_logs(&*take_weak!(self.miner), &filter);
|
||||||
|
|
||||||
// remove logs about which client was already notified about
|
// remove logs about which client was already notified about
|
||||||
let new_pending_logs: Vec<_> = pending_logs.iter()
|
let new_pending_logs: Vec<_> = pending_logs.iter()
|
||||||
.filter(|p| !previous_logs.contains(p))
|
.filter(|p| !previous_logs.contains(p))
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// save all logs retrieved by client
|
// save all logs retrieved by client
|
||||||
*previous_logs = pending_logs.into_iter().collect();
|
*previous_logs = pending_logs.into_iter().collect();
|
||||||
|
|
||||||
// append logs array with new pending logs
|
// append logs array with new pending logs
|
||||||
logs.extend(new_pending_logs);
|
logs.extend(new_pending_logs);
|
||||||
}
|
|
||||||
|
|
||||||
// save the number of the next block as a first block from which
|
|
||||||
// we want to get logs
|
|
||||||
*block_number = current_number + 1;
|
|
||||||
|
|
||||||
Ok(to_value(&logs))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let logs = limit_logs(logs, filter.limit);
|
||||||
|
|
||||||
|
// save the number of the next block as a first block from which
|
||||||
|
// we want to get logs
|
||||||
|
*block_number = current_number + 1;
|
||||||
|
|
||||||
|
Ok(FilterChanges::Logs(logs))
|
||||||
}
|
}
|
||||||
})
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_logs(&self, params: Params) -> Result<Value, Error> {
|
fn filter_logs(&self, index: Index) -> Result<Vec<Log>, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(Index,)>(params)
|
|
||||||
.and_then(|(index,)| {
|
|
||||||
let mut polls = self.polls.lock();
|
|
||||||
match polls.poll(&index.value()) {
|
|
||||||
Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => {
|
|
||||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
|
||||||
let filter: EthcoreFilter = filter.clone().into();
|
|
||||||
let mut logs = take_weak!(self.client).logs(filter.clone(), None)
|
|
||||||
.into_iter()
|
|
||||||
.map(From::from)
|
|
||||||
.collect::<Vec<Log>>();
|
|
||||||
|
|
||||||
if include_pending {
|
let mut polls = self.polls.lock();
|
||||||
logs.extend(pending_logs(&*take_weak!(self.miner), &filter));
|
match polls.poll(&index.value()) {
|
||||||
}
|
Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => {
|
||||||
|
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||||
|
let filter: EthcoreFilter = filter.clone().into();
|
||||||
|
let mut logs = take_weak!(self.client).logs(filter.clone())
|
||||||
|
.into_iter()
|
||||||
|
.map(From::from)
|
||||||
|
.collect::<Vec<Log>>();
|
||||||
|
|
||||||
Ok(to_value(&logs))
|
if include_pending {
|
||||||
},
|
logs.extend(pending_logs(&*take_weak!(self.miner), &filter));
|
||||||
// just empty array
|
|
||||||
_ => Ok(Value::Array(vec![] as Vec<Value>)),
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
|
let logs = limit_logs(logs, filter.limit);
|
||||||
|
|
||||||
|
Ok(logs)
|
||||||
|
},
|
||||||
|
// just empty array
|
||||||
|
_ => Ok(Vec::new()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uninstall_filter(&self, params: Params) -> Result<Value, Error> {
|
fn uninstall_filter(&self, index: Index) -> Result<bool, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(Index,)>(params)
|
|
||||||
.map(|(index,)| {
|
self.polls.lock().remove_poll(&index.value());
|
||||||
self.polls.lock().remove_poll(&index.value());
|
Ok(true)
|
||||||
to_value(&true)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,10 +23,10 @@ use ethcore::client::MiningBlockChainClient;
|
|||||||
use util::{U256, Address, H256, Mutex};
|
use util::{U256, Address, H256, Mutex};
|
||||||
use transient_hashmap::TransientHashMap;
|
use transient_hashmap::TransientHashMap;
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use v1::helpers::{errors, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationsQueue, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest};
|
use v1::helpers::{errors, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest, SignerService};
|
||||||
use v1::helpers::dispatch::{default_gas_price, sign_and_dispatch};
|
use v1::helpers::dispatch::{default_gas_price, sign_and_dispatch};
|
||||||
use v1::traits::EthSigning;
|
use v1::traits::EthSigning;
|
||||||
use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256};
|
use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256, Bytes as RpcBytes};
|
||||||
|
|
||||||
fn fill_optional_fields<C, M>(request: TRequest, client: &C, miner: &M) -> FilledRequest
|
fn fill_optional_fields<C, M>(request: TRequest, client: &C, miner: &M) -> FilledRequest
|
||||||
where C: MiningBlockChainClient, M: MinerService {
|
where C: MiningBlockChainClient, M: MinerService {
|
||||||
@ -43,7 +43,7 @@ fn fill_optional_fields<C, M>(request: TRequest, client: &C, miner: &M) -> Fille
|
|||||||
|
|
||||||
/// Implementation of functions that require signing when no trusted signer is used.
|
/// Implementation of functions that require signing when no trusted signer is used.
|
||||||
pub struct EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
pub struct EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
queue: Weak<ConfirmationsQueue>,
|
signer: Weak<SignerService>,
|
||||||
accounts: Weak<AccountProvider>,
|
accounts: Weak<AccountProvider>,
|
||||||
client: Weak<C>,
|
client: Weak<C>,
|
||||||
miner: Weak<M>,
|
miner: Weak<M>,
|
||||||
@ -60,9 +60,9 @@ pub enum DispatchResult {
|
|||||||
|
|
||||||
impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
/// Creates a new signing queue client given shared signing queue.
|
/// Creates a new signing queue client given shared signing queue.
|
||||||
pub fn new(queue: &Arc<ConfirmationsQueue>, client: &Arc<C>, miner: &Arc<M>, accounts: &Arc<AccountProvider>) -> Self {
|
pub fn new(signer: &Arc<SignerService>, client: &Arc<C>, miner: &Arc<M>, accounts: &Arc<AccountProvider>) -> Self {
|
||||||
EthSigningQueueClient {
|
EthSigningQueueClient {
|
||||||
queue: Arc::downgrade(queue),
|
signer: Arc::downgrade(signer),
|
||||||
accounts: Arc::downgrade(accounts),
|
accounts: Arc::downgrade(accounts),
|
||||||
client: Arc::downgrade(client),
|
client: Arc::downgrade(client),
|
||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
@ -86,8 +86,8 @@ impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: Miner
|
|||||||
return Ok(DispatchResult::Value(to_value(&accounts.sign(address, msg).ok().map_or_else(RpcH520::default, Into::into))))
|
return Ok(DispatchResult::Value(to_value(&accounts.sign(address, msg).ok().map_or_else(RpcH520::default, Into::into))))
|
||||||
}
|
}
|
||||||
|
|
||||||
let queue = take_weak!(self.queue);
|
let signer = take_weak!(self.signer);
|
||||||
queue.add_request(ConfirmationPayload::Sign(address, msg))
|
signer.add_request(ConfirmationPayload::Sign(address, msg))
|
||||||
.map(DispatchResult::Promise)
|
.map(DispatchResult::Promise)
|
||||||
.map_err(|_| errors::request_rejected_limit())
|
.map_err(|_| errors::request_rejected_limit())
|
||||||
})
|
})
|
||||||
@ -105,9 +105,9 @@ impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: Miner
|
|||||||
return sign_and_dispatch(&*client, &*miner, request, &*accounts, sender).map(DispatchResult::Value);
|
return sign_and_dispatch(&*client, &*miner, request, &*accounts, sender).map(DispatchResult::Value);
|
||||||
}
|
}
|
||||||
|
|
||||||
let queue = take_weak!(self.queue);
|
let signer = take_weak!(self.signer);
|
||||||
let request = fill_optional_fields(request, &*client, &*miner);
|
let request = fill_optional_fields(request, &*client, &*miner);
|
||||||
queue.add_request(ConfirmationPayload::Transaction(request))
|
signer.add_request(ConfirmationPayload::Transaction(request))
|
||||||
.map(DispatchResult::Promise)
|
.map(DispatchResult::Promise)
|
||||||
.map_err(|_| errors::request_rejected_limit())
|
.map_err(|_| errors::request_rejected_limit())
|
||||||
})
|
})
|
||||||
@ -168,6 +168,13 @@ impl<C, M> EthSigning for EthSigningQueueClient<C, M>
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn decrypt_message(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
from_params::<(RpcH160, RpcBytes)>(params).and_then(|(_account, _ciphertext)| {
|
||||||
|
Err(errors::unimplemented())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn check_request(&self, params: Params) -> Result<Value, Error> {
|
fn check_request(&self, params: Params) -> Result<Value, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
let mut pending = self.pending.lock();
|
let mut pending = self.pending.lock();
|
||||||
@ -241,6 +248,14 @@ impl<C, M> EthSigning for EthSigningUnsafeClient<C, M> where
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn decrypt_message(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
from_params::<(RpcH160, RpcBytes)>(params).and_then(|(address, ciphertext)| {
|
||||||
|
let s = try!(take_weak!(self.accounts).decrypt(address.into(), &[0; 0], &ciphertext.0).map_err(|_| Error::internal_error()));
|
||||||
|
Ok(to_value(RpcBytes::from(s)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn post_sign(&self, _: Params) -> Result<Value, Error> {
|
fn post_sign(&self, _: Params) -> Result<Value, Error> {
|
||||||
// We don't support this in non-signer mode.
|
// We don't support this in non-signer mode.
|
||||||
Err(errors::signer_disabled())
|
Err(errors::signer_disabled())
|
||||||
|
@ -21,6 +21,7 @@ use std::collections::{BTreeMap};
|
|||||||
use util::{RotatingLogger, Address};
|
use util::{RotatingLogger, Address};
|
||||||
use util::misc::version_data;
|
use util::misc::version_data;
|
||||||
|
|
||||||
|
use crypto::ecies;
|
||||||
use ethkey::{Brain, Generator};
|
use ethkey::{Brain, Generator};
|
||||||
use ethstore::random_phrase;
|
use ethstore::random_phrase;
|
||||||
use ethsync::{SyncProvider, ManageNetwork};
|
use ethsync::{SyncProvider, ManageNetwork};
|
||||||
@ -29,8 +30,8 @@ use ethcore::client::{MiningBlockChainClient};
|
|||||||
|
|
||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use v1::traits::Ethcore;
|
use v1::traits::Ethcore;
|
||||||
use v1::types::{Bytes, U256, H160, Peers};
|
use v1::types::{Bytes, U256, H160, H512, Peers, Transaction};
|
||||||
use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, NetworkSettings};
|
use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings};
|
||||||
use v1::helpers::params::expect_no_params;
|
use v1::helpers::params::expect_no_params;
|
||||||
|
|
||||||
/// Ethcore implementation.
|
/// Ethcore implementation.
|
||||||
@ -45,7 +46,7 @@ pub struct EthcoreClient<C, M, S: ?Sized> where
|
|||||||
net: Weak<ManageNetwork>,
|
net: Weak<ManageNetwork>,
|
||||||
logger: Arc<RotatingLogger>,
|
logger: Arc<RotatingLogger>,
|
||||||
settings: Arc<NetworkSettings>,
|
settings: Arc<NetworkSettings>,
|
||||||
confirmations_queue: Option<Arc<ConfirmationsQueue>>,
|
signer: Option<Arc<SignerService>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M: MinerService, S: SyncProvider {
|
impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M: MinerService, S: SyncProvider {
|
||||||
@ -57,7 +58,7 @@ impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M:
|
|||||||
net: &Arc<ManageNetwork>,
|
net: &Arc<ManageNetwork>,
|
||||||
logger: Arc<RotatingLogger>,
|
logger: Arc<RotatingLogger>,
|
||||||
settings: Arc<NetworkSettings>,
|
settings: Arc<NetworkSettings>,
|
||||||
queue: Option<Arc<ConfirmationsQueue>>
|
signer: Option<Arc<SignerService>>
|
||||||
) -> Self {
|
) -> Self {
|
||||||
EthcoreClient {
|
EthcoreClient {
|
||||||
client: Arc::downgrade(client),
|
client: Arc::downgrade(client),
|
||||||
@ -66,7 +67,7 @@ impl<C, M, S: ?Sized> EthcoreClient<C, M, S> where C: MiningBlockChainClient, M:
|
|||||||
net: Arc::downgrade(net),
|
net: Arc::downgrade(net),
|
||||||
logger: logger,
|
logger: logger,
|
||||||
settings: settings,
|
settings: settings,
|
||||||
confirmations_queue: queue,
|
signer: signer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,9 +199,9 @@ impl<C, M, S: ?Sized> Ethcore for EthcoreClient<C, M, S> where M: MinerService +
|
|||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
try!(expect_no_params(params));
|
||||||
|
|
||||||
match self.confirmations_queue {
|
match self.signer {
|
||||||
None => Err(errors::signer_disabled()),
|
None => Err(errors::signer_disabled()),
|
||||||
Some(ref queue) => Ok(to_value(&queue.len())),
|
Some(ref signer) => Ok(to_value(&signer.len())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,4 +218,19 @@ impl<C, M, S: ?Sized> Ethcore for EthcoreClient<C, M, S> where M: MinerService +
|
|||||||
to_value(&H160::from(Brain::new(phrase).generate().unwrap().address()))
|
to_value(&H160::from(Brain::new(phrase).generate().unwrap().address()))
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn encrypt_message(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| {
|
||||||
|
let s = try!(ecies::encrypt(&key.into(), &[0; 0], &phrase.0).map_err(|_| Error::internal_error()));
|
||||||
|
Ok(to_value(&Bytes::from(s)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pending_transactions(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
try!(expect_no_params(params));
|
||||||
|
|
||||||
|
Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::<Vec<Transaction>>()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,13 +23,13 @@ use ethcore::client::MiningBlockChainClient;
|
|||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use v1::traits::PersonalSigner;
|
use v1::traits::PersonalSigner;
|
||||||
use v1::types::{TransactionModification, ConfirmationRequest, U256};
|
use v1::types::{TransactionModification, ConfirmationRequest, U256};
|
||||||
use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, ConfirmationPayload};
|
use v1::helpers::{errors, SignerService, SigningQueue, ConfirmationPayload};
|
||||||
use v1::helpers::params::expect_no_params;
|
use v1::helpers::params::expect_no_params;
|
||||||
use v1::helpers::dispatch::{unlock_sign_and_dispatch, signature_with_password};
|
use v1::helpers::dispatch::{unlock_sign_and_dispatch, signature_with_password};
|
||||||
|
|
||||||
/// Transactions confirmation (personal) rpc implementation.
|
/// Transactions confirmation (personal) rpc implementation.
|
||||||
pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
queue: Weak<ConfirmationsQueue>,
|
signer: Weak<SignerService>,
|
||||||
accounts: Weak<AccountProvider>,
|
accounts: Weak<AccountProvider>,
|
||||||
client: Weak<C>,
|
client: Weak<C>,
|
||||||
miner: Weak<M>,
|
miner: Weak<M>,
|
||||||
@ -38,9 +38,14 @@ pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
|||||||
impl<C: 'static, M: 'static> SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
impl<C: 'static, M: 'static> SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
|
|
||||||
/// Create new instance of signer client.
|
/// Create new instance of signer client.
|
||||||
pub fn new(store: &Arc<AccountProvider>, client: &Arc<C>, miner: &Arc<M>, queue: &Arc<ConfirmationsQueue>) -> Self {
|
pub fn new(
|
||||||
|
store: &Arc<AccountProvider>,
|
||||||
|
client: &Arc<C>,
|
||||||
|
miner: &Arc<M>,
|
||||||
|
signer: &Arc<SignerService>,
|
||||||
|
) -> Self {
|
||||||
SignerClient {
|
SignerClient {
|
||||||
queue: Arc::downgrade(queue),
|
signer: Arc::downgrade(signer),
|
||||||
accounts: Arc::downgrade(store),
|
accounts: Arc::downgrade(store),
|
||||||
client: Arc::downgrade(client),
|
client: Arc::downgrade(client),
|
||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
@ -59,8 +64,8 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
|
|||||||
fn requests_to_confirm(&self, params: Params) -> Result<Value, Error> {
|
fn requests_to_confirm(&self, params: Params) -> Result<Value, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
try!(expect_no_params(params));
|
try!(expect_no_params(params));
|
||||||
let queue = take_weak!(self.queue);
|
let signer = take_weak!(self.signer);
|
||||||
Ok(to_value(&queue.requests().into_iter().map(From::from).collect::<Vec<ConfirmationRequest>>()))
|
Ok(to_value(&signer.requests().into_iter().map(From::from).collect::<Vec<ConfirmationRequest>>()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn confirm_request(&self, params: Params) -> Result<Value, Error> {
|
fn confirm_request(&self, params: Params) -> Result<Value, Error> {
|
||||||
@ -71,11 +76,11 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
|
|||||||
|(id, modification, pass)| {
|
|(id, modification, pass)| {
|
||||||
let id = id.into();
|
let id = id.into();
|
||||||
let accounts = take_weak!(self.accounts);
|
let accounts = take_weak!(self.accounts);
|
||||||
let queue = take_weak!(self.queue);
|
let signer = take_weak!(self.signer);
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
let miner = take_weak!(self.miner);
|
let miner = take_weak!(self.miner);
|
||||||
|
|
||||||
queue.peek(&id).map(|confirmation| {
|
signer.peek(&id).map(|confirmation| {
|
||||||
let result = match confirmation.payload {
|
let result = match confirmation.payload {
|
||||||
ConfirmationPayload::Transaction(mut request) => {
|
ConfirmationPayload::Transaction(mut request) => {
|
||||||
// apply modification
|
// apply modification
|
||||||
@ -90,7 +95,7 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Ok(ref response) = result {
|
if let Ok(ref response) = result {
|
||||||
queue.request_confirmed(id, Ok(response.clone()));
|
signer.request_confirmed(id, Ok(response.clone()));
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
}).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id)))
|
}).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id)))
|
||||||
@ -102,11 +107,20 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
|
|||||||
try!(self.active());
|
try!(self.active());
|
||||||
from_params::<(U256, )>(params).and_then(
|
from_params::<(U256, )>(params).and_then(
|
||||||
|(id, )| {
|
|(id, )| {
|
||||||
let queue = take_weak!(self.queue);
|
let signer = take_weak!(self.signer);
|
||||||
let res = queue.request_rejected(id.into());
|
let res = signer.request_rejected(id.into());
|
||||||
Ok(to_value(&res.is_some()))
|
Ok(to_value(&res.is_some()))
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn generate_token(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
try!(expect_no_params(params));
|
||||||
|
let signer = take_weak!(self.signer);
|
||||||
|
signer.generate_token()
|
||||||
|
.map(|token| to_value(&token))
|
||||||
|
.map_err(|e| errors::token(e))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,4 +28,4 @@ pub mod types;
|
|||||||
|
|
||||||
pub use self::traits::{Web3, Eth, EthFilter, EthSigning, Personal, PersonalSigner, Net, Ethcore, EthcoreSet, Traces, Rpc};
|
pub use self::traits::{Web3, Eth, EthFilter, EthSigning, Personal, PersonalSigner, Net, Ethcore, EthcoreSet, Traces, Rpc};
|
||||||
pub use self::impls::*;
|
pub use self::impls::*;
|
||||||
pub use self::helpers::{SigningQueue, ConfirmationsQueue, NetworkSettings};
|
pub use self::helpers::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings};
|
||||||
|
@ -27,7 +27,7 @@ use ethcore::receipt::LocalizedReceipt;
|
|||||||
use ethcore::transaction::{Transaction, Action};
|
use ethcore::transaction::{Transaction, Action};
|
||||||
use ethcore::miner::{ExternalMiner, MinerService};
|
use ethcore::miner::{ExternalMiner, MinerService};
|
||||||
use ethsync::SyncState;
|
use ethsync::SyncState;
|
||||||
use v1::{Eth, EthClient, EthClientOptions, EthSigning, EthSigningUnsafeClient};
|
use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, EthSigningUnsafeClient};
|
||||||
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService};
|
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService};
|
||||||
use rustc_serialize::hex::ToHex;
|
use rustc_serialize::hex::ToHex;
|
||||||
use time::get_time;
|
use time::get_time;
|
||||||
@ -76,10 +76,12 @@ impl EthTester {
|
|||||||
let hashrates = Arc::new(Mutex::new(HashMap::new()));
|
let hashrates = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let external_miner = Arc::new(ExternalMiner::new(hashrates.clone()));
|
let external_miner = Arc::new(ExternalMiner::new(hashrates.clone()));
|
||||||
let eth = EthClient::new(&client, &sync, &ap, &miner, &external_miner, options).to_delegate();
|
let eth = EthClient::new(&client, &sync, &ap, &miner, &external_miner, options).to_delegate();
|
||||||
|
let filter = EthFilterClient::new(&client, &miner).to_delegate();
|
||||||
let sign = EthSigningUnsafeClient::new(&client, &ap, &miner).to_delegate();
|
let sign = EthSigningUnsafeClient::new(&client, &ap, &miner).to_delegate();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(eth);
|
io.add_delegate(eth);
|
||||||
io.add_delegate(sign);
|
io.add_delegate(sign);
|
||||||
|
io.add_delegate(filter);
|
||||||
|
|
||||||
EthTester {
|
EthTester {
|
||||||
client: client,
|
client: client,
|
||||||
@ -152,23 +154,88 @@ fn rpc_eth_hashrate() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn rpc_eth_logs() {
|
fn rpc_eth_logs() {
|
||||||
let tester = EthTester::default();
|
let tester = EthTester::default();
|
||||||
|
tester.client.set_logs(vec![LocalizedLogEntry {
|
||||||
|
block_number: 1,
|
||||||
|
block_hash: H256::default(),
|
||||||
|
entry: LogEntry {
|
||||||
|
address: Address::default(),
|
||||||
|
topics: vec![],
|
||||||
|
data: vec![1,2,3],
|
||||||
|
},
|
||||||
|
transaction_index: 0,
|
||||||
|
transaction_hash: H256::default(),
|
||||||
|
log_index: 0,
|
||||||
|
}, LocalizedLogEntry {
|
||||||
|
block_number: 1,
|
||||||
|
block_hash: H256::default(),
|
||||||
|
entry: LogEntry {
|
||||||
|
address: Address::default(),
|
||||||
|
topics: vec![],
|
||||||
|
data: vec![1,2,3],
|
||||||
|
},
|
||||||
|
transaction_index: 0,
|
||||||
|
transaction_hash: H256::default(),
|
||||||
|
log_index: 0,
|
||||||
|
}]);
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#;
|
|
||||||
let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
|
|
||||||
|
|
||||||
assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned()));
|
let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#;
|
||||||
|
let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1}], "id": 1}"#;
|
||||||
|
let request3 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":0}], "id": 1}"#;
|
||||||
|
|
||||||
|
let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#;
|
||||||
|
let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#;
|
||||||
|
let response3 = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
|
||||||
|
|
||||||
|
assert_eq!(tester.io.handle_request_sync(request1), Some(response1.to_owned()));
|
||||||
|
assert_eq!(tester.io.handle_request_sync(request2), Some(response2.to_owned()));
|
||||||
|
assert_eq!(tester.io.handle_request_sync(request3), Some(response3.to_owned()));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rpc_eth_logs_with_limit() {
|
fn rpc_logs_filter() {
|
||||||
let tester = EthTester::default();
|
let tester = EthTester::default();
|
||||||
|
// Set some logs
|
||||||
|
tester.client.set_logs(vec![LocalizedLogEntry {
|
||||||
|
block_number: 1,
|
||||||
|
block_hash: H256::default(),
|
||||||
|
entry: LogEntry {
|
||||||
|
address: Address::default(),
|
||||||
|
topics: vec![],
|
||||||
|
data: vec![1,2,3],
|
||||||
|
},
|
||||||
|
transaction_index: 0,
|
||||||
|
transaction_hash: H256::default(),
|
||||||
|
log_index: 0,
|
||||||
|
}, LocalizedLogEntry {
|
||||||
|
block_number: 1,
|
||||||
|
block_hash: H256::default(),
|
||||||
|
entry: LogEntry {
|
||||||
|
address: Address::default(),
|
||||||
|
topics: vec![],
|
||||||
|
data: vec![1,2,3],
|
||||||
|
},
|
||||||
|
transaction_index: 0,
|
||||||
|
transaction_hash: H256::default(),
|
||||||
|
log_index: 0,
|
||||||
|
}]);
|
||||||
|
|
||||||
let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}, 1], "id": 1}"#;
|
// Register filters first
|
||||||
let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}, 0], "id": 1}"#;
|
let request_default = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{}], "id": 1}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
|
let request_limit = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{"limit":1}], "id": 1}"#;
|
||||||
|
let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#;
|
||||||
|
let response2 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#;
|
||||||
|
|
||||||
assert_eq!(tester.io.handle_request_sync(request1), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(request_default), Some(response1.to_owned()));
|
||||||
assert_eq!(tester.io.handle_request_sync(request2), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(request_limit), Some(response2.to_owned()));
|
||||||
|
|
||||||
|
let request_changes1 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#;
|
||||||
|
let request_changes2 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x1"], "id": 1}"#;
|
||||||
|
let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#;
|
||||||
|
let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#;
|
||||||
|
|
||||||
|
assert_eq!(tester.io.handle_request_sync(request_changes1), Some(response1.to_owned()));
|
||||||
|
assert_eq!(tester.io.handle_request_sync(request_changes2), Some(response2.to_owned()));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -390,7 +457,7 @@ fn rpc_eth_pending_transaction_by_hash() {
|
|||||||
tester.miner.pending_transactions.lock().insert(H256::zero(), tx);
|
tester.miner.pending_transactions.lock().insert(H256::zero(), tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0xa"},"id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0xa"},"id":1}"#;
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"method": "eth_getTransactionByHash",
|
"method": "eth_getTransactionByHash",
|
||||||
|
@ -19,7 +19,7 @@ use std::sync::Arc;
|
|||||||
use jsonrpc_core::{IoHandler, to_value};
|
use jsonrpc_core::{IoHandler, to_value};
|
||||||
use v1::impls::EthSigningQueueClient;
|
use v1::impls::EthSigningQueueClient;
|
||||||
use v1::traits::EthSigning;
|
use v1::traits::EthSigning;
|
||||||
use v1::helpers::{ConfirmationsQueue, SigningQueue};
|
use v1::helpers::{SignerService, SigningQueue};
|
||||||
use v1::types::{H256 as RpcH256, H520 as RpcH520};
|
use v1::types::{H256 as RpcH256, H520 as RpcH520};
|
||||||
use v1::tests::helpers::TestMinerService;
|
use v1::tests::helpers::TestMinerService;
|
||||||
use util::{Address, FixedHash, Uint, U256, H256, H520};
|
use util::{Address, FixedHash, Uint, U256, H256, H520};
|
||||||
@ -28,7 +28,7 @@ use ethcore::client::TestBlockChainClient;
|
|||||||
use ethcore::transaction::{Transaction, Action};
|
use ethcore::transaction::{Transaction, Action};
|
||||||
|
|
||||||
struct EthSigningTester {
|
struct EthSigningTester {
|
||||||
pub queue: Arc<ConfirmationsQueue>,
|
pub signer: Arc<SignerService>,
|
||||||
pub client: Arc<TestBlockChainClient>,
|
pub client: Arc<TestBlockChainClient>,
|
||||||
pub miner: Arc<TestMinerService>,
|
pub miner: Arc<TestMinerService>,
|
||||||
pub accounts: Arc<AccountProvider>,
|
pub accounts: Arc<AccountProvider>,
|
||||||
@ -37,15 +37,15 @@ struct EthSigningTester {
|
|||||||
|
|
||||||
impl Default for EthSigningTester {
|
impl Default for EthSigningTester {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
let queue = Arc::new(ConfirmationsQueue::default());
|
let signer = Arc::new(SignerService::new_test());
|
||||||
let client = Arc::new(TestBlockChainClient::default());
|
let client = Arc::new(TestBlockChainClient::default());
|
||||||
let miner = Arc::new(TestMinerService::default());
|
let miner = Arc::new(TestMinerService::default());
|
||||||
let accounts = Arc::new(AccountProvider::transient_provider());
|
let accounts = Arc::new(AccountProvider::transient_provider());
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(EthSigningQueueClient::new(&queue, &client, &miner, &accounts).to_delegate());
|
io.add_delegate(EthSigningQueueClient::new(&signer, &client, &miner, &accounts).to_delegate());
|
||||||
|
|
||||||
EthSigningTester {
|
EthSigningTester {
|
||||||
queue: queue,
|
signer: signer,
|
||||||
client: client,
|
client: client,
|
||||||
miner: miner,
|
miner: miner,
|
||||||
accounts: accounts,
|
accounts: accounts,
|
||||||
@ -63,7 +63,7 @@ fn should_add_sign_to_queue() {
|
|||||||
// given
|
// given
|
||||||
let tester = eth_signing();
|
let tester = eth_signing();
|
||||||
let address = Address::random();
|
let address = Address::random();
|
||||||
assert_eq!(tester.queue.requests().len(), 0);
|
assert_eq!(tester.signer.requests().len(), 0);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
@ -79,9 +79,9 @@ fn should_add_sign_to_queue() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
let async_result = tester.io.handle_request(&request).unwrap();
|
let async_result = tester.io.handle_request(&request).unwrap();
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
// respond
|
// respond
|
||||||
tester.queue.request_confirmed(U256::from(1), Ok(to_value(&RpcH520::from(H520::default()))));
|
tester.signer.request_confirmed(U256::from(1), Ok(to_value(&RpcH520::from(H520::default()))));
|
||||||
assert!(async_result.on_result(move |res| {
|
assert!(async_result.on_result(move |res| {
|
||||||
assert_eq!(res, response.to_owned());
|
assert_eq!(res, response.to_owned());
|
||||||
}));
|
}));
|
||||||
@ -92,7 +92,7 @@ fn should_post_sign_to_queue() {
|
|||||||
// given
|
// given
|
||||||
let tester = eth_signing();
|
let tester = eth_signing();
|
||||||
let address = Address::random();
|
let address = Address::random();
|
||||||
assert_eq!(tester.queue.requests().len(), 0);
|
assert_eq!(tester.signer.requests().len(), 0);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
@ -108,7 +108,7 @@ fn should_post_sign_to_queue() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -155,7 +155,7 @@ fn should_check_status_of_request_when_its_resolved() {
|
|||||||
"id": 1
|
"id": 1
|
||||||
}"#;
|
}"#;
|
||||||
tester.io.handle_request_sync(&request).expect("Sent");
|
tester.io.handle_request_sync(&request).expect("Sent");
|
||||||
tester.queue.request_confirmed(U256::from(1), Ok(to_value(&"Hello World!")));
|
tester.signer.request_confirmed(U256::from(1), Ok(to_value(&"Hello World!")));
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
@ -192,7 +192,7 @@ fn should_sign_if_account_is_unlocked() {
|
|||||||
}"#;
|
}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{}", signature).as_ref() + r#"","id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{}", signature).as_ref() + r#"","id":1}"#;
|
||||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
assert_eq!(tester.queue.requests().len(), 0);
|
assert_eq!(tester.signer.requests().len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -200,7 +200,7 @@ fn should_add_transaction_to_queue() {
|
|||||||
// given
|
// given
|
||||||
let tester = eth_signing();
|
let tester = eth_signing();
|
||||||
let address = Address::random();
|
let address = Address::random();
|
||||||
assert_eq!(tester.queue.requests().len(), 0);
|
assert_eq!(tester.signer.requests().len(), 0);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
@ -219,9 +219,9 @@ fn should_add_transaction_to_queue() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
let async_result = tester.io.handle_request(&request).unwrap();
|
let async_result = tester.io.handle_request(&request).unwrap();
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
// respond
|
// respond
|
||||||
tester.queue.request_confirmed(U256::from(1), Ok(to_value(&RpcH256::from(H256::default()))));
|
tester.signer.request_confirmed(U256::from(1), Ok(to_value(&RpcH256::from(H256::default()))));
|
||||||
assert!(async_result.on_result(move |res| {
|
assert!(async_result.on_result(move |res| {
|
||||||
assert_eq!(res, response.to_owned());
|
assert_eq!(res, response.to_owned());
|
||||||
}));
|
}));
|
||||||
|
@ -22,7 +22,7 @@ use ethcore::client::{TestBlockChainClient};
|
|||||||
|
|
||||||
use jsonrpc_core::IoHandler;
|
use jsonrpc_core::IoHandler;
|
||||||
use v1::{Ethcore, EthcoreClient};
|
use v1::{Ethcore, EthcoreClient};
|
||||||
use v1::helpers::{ConfirmationsQueue, NetworkSettings};
|
use v1::helpers::{SignerService, NetworkSettings};
|
||||||
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService};
|
use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService};
|
||||||
use super::manage_network::TestManageNetwork;
|
use super::manage_network::TestManageNetwork;
|
||||||
|
|
||||||
@ -262,8 +262,8 @@ fn rpc_ethcore_unsigned_transactions_count() {
|
|||||||
let sync = sync_provider();
|
let sync = sync_provider();
|
||||||
let net = network_service();
|
let net = network_service();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
let queue = Arc::new(ConfirmationsQueue::default());
|
let signer = Arc::new(SignerService::new_test());
|
||||||
let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(queue)).to_delegate();
|
let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(signer)).to_delegate();
|
||||||
io.add_delegate(ethcore);
|
io.add_delegate(ethcore);
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#;
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#;
|
||||||
@ -286,3 +286,18 @@ fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() {
|
|||||||
|
|
||||||
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
|
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rpc_ethcore_pending_transactions() {
|
||||||
|
let miner = miner_service();
|
||||||
|
let client = client_service();
|
||||||
|
let sync = sync_provider();
|
||||||
|
let net = network_service();
|
||||||
|
let io = IoHandler::new();
|
||||||
|
io.add_delegate(ethcore_client(&client, &miner, &sync, &net).to_delegate());
|
||||||
|
|
||||||
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_pendingTransactions", "params":[], "id": 1}"#;
|
||||||
|
let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
|
||||||
|
|
||||||
|
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
|
||||||
|
}
|
||||||
|
@ -23,10 +23,10 @@ use ethcore::client::TestBlockChainClient;
|
|||||||
use ethcore::transaction::{Transaction, Action};
|
use ethcore::transaction::{Transaction, Action};
|
||||||
use v1::{SignerClient, PersonalSigner};
|
use v1::{SignerClient, PersonalSigner};
|
||||||
use v1::tests::helpers::TestMinerService;
|
use v1::tests::helpers::TestMinerService;
|
||||||
use v1::helpers::{SigningQueue, ConfirmationsQueue, FilledTransactionRequest, ConfirmationPayload};
|
use v1::helpers::{SigningQueue, SignerService, FilledTransactionRequest, ConfirmationPayload};
|
||||||
|
|
||||||
struct PersonalSignerTester {
|
struct PersonalSignerTester {
|
||||||
queue: Arc<ConfirmationsQueue>,
|
signer: Arc<SignerService>,
|
||||||
accounts: Arc<AccountProvider>,
|
accounts: Arc<AccountProvider>,
|
||||||
io: IoHandler,
|
io: IoHandler,
|
||||||
miner: Arc<TestMinerService>,
|
miner: Arc<TestMinerService>,
|
||||||
@ -49,16 +49,16 @@ fn miner_service() -> Arc<TestMinerService> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn signer_tester() -> PersonalSignerTester {
|
fn signer_tester() -> PersonalSignerTester {
|
||||||
let queue = Arc::new(ConfirmationsQueue::default());
|
let signer = Arc::new(SignerService::new_test());
|
||||||
let accounts = accounts_provider();
|
let accounts = accounts_provider();
|
||||||
let client = blockchain_client();
|
let client = blockchain_client();
|
||||||
let miner = miner_service();
|
let miner = miner_service();
|
||||||
|
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(SignerClient::new(&accounts, &client, &miner, &queue).to_delegate());
|
io.add_delegate(SignerClient::new(&accounts, &client, &miner, &signer).to_delegate());
|
||||||
|
|
||||||
PersonalSignerTester {
|
PersonalSignerTester {
|
||||||
queue: queue,
|
signer: signer,
|
||||||
accounts: accounts,
|
accounts: accounts,
|
||||||
io: io,
|
io: io,
|
||||||
miner: miner,
|
miner: miner,
|
||||||
@ -71,7 +71,7 @@ fn signer_tester() -> PersonalSignerTester {
|
|||||||
fn should_return_list_of_items_to_confirm() {
|
fn should_return_list_of_items_to_confirm() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
||||||
from: Address::from(1),
|
from: Address::from(1),
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
gas_price: U256::from(10_000),
|
gas_price: U256::from(10_000),
|
||||||
@ -80,7 +80,7 @@ fn should_return_list_of_items_to_confirm() {
|
|||||||
data: vec![],
|
data: vec![],
|
||||||
nonce: None,
|
nonce: None,
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
tester.queue.add_request(ConfirmationPayload::Sign(1.into(), 5.into())).unwrap();
|
tester.signer.add_request(ConfirmationPayload::Sign(1.into(), 5.into())).unwrap();
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{"jsonrpc":"2.0","method":"personal_requestsToConfirm","params":[],"id":1}"#;
|
let request = r#"{"jsonrpc":"2.0","method":"personal_requestsToConfirm","params":[],"id":1}"#;
|
||||||
@ -100,7 +100,7 @@ fn should_return_list_of_items_to_confirm() {
|
|||||||
fn should_reject_transaction_from_queue_without_dispatching() {
|
fn should_reject_transaction_from_queue_without_dispatching() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
||||||
from: Address::from(1),
|
from: Address::from(1),
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
gas_price: U256::from(10_000),
|
gas_price: U256::from(10_000),
|
||||||
@ -109,7 +109,7 @@ fn should_reject_transaction_from_queue_without_dispatching() {
|
|||||||
data: vec![],
|
data: vec![],
|
||||||
nonce: None,
|
nonce: None,
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{"jsonrpc":"2.0","method":"personal_rejectRequest","params":["0x1"],"id":1}"#;
|
let request = r#"{"jsonrpc":"2.0","method":"personal_rejectRequest","params":["0x1"],"id":1}"#;
|
||||||
@ -117,7 +117,7 @@ fn should_reject_transaction_from_queue_without_dispatching() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
assert_eq!(tester.queue.requests().len(), 0);
|
assert_eq!(tester.signer.requests().len(), 0);
|
||||||
assert_eq!(tester.miner.imported_transactions.lock().len(), 0);
|
assert_eq!(tester.miner.imported_transactions.lock().len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ fn should_reject_transaction_from_queue_without_dispatching() {
|
|||||||
fn should_not_remove_transaction_if_password_is_invalid() {
|
fn should_not_remove_transaction_if_password_is_invalid() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
||||||
from: Address::from(1),
|
from: Address::from(1),
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
gas_price: U256::from(10_000),
|
gas_price: U256::from(10_000),
|
||||||
@ -134,7 +134,7 @@ fn should_not_remove_transaction_if_password_is_invalid() {
|
|||||||
data: vec![],
|
data: vec![],
|
||||||
nonce: None,
|
nonce: None,
|
||||||
})).unwrap();
|
})).unwrap();
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#;
|
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#;
|
||||||
@ -142,15 +142,15 @@ fn should_not_remove_transaction_if_password_is_invalid() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_not_remove_sign_if_password_is_invalid() {
|
fn should_not_remove_sign_if_password_is_invalid() {
|
||||||
// given
|
// given
|
||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
tester.queue.add_request(ConfirmationPayload::Sign(0.into(), 5.into())).unwrap();
|
tester.signer.add_request(ConfirmationPayload::Sign(0.into(), 5.into())).unwrap();
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#;
|
let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#;
|
||||||
@ -158,7 +158,7 @@ fn should_not_remove_sign_if_password_is_invalid() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -167,7 +167,7 @@ fn should_confirm_transaction_and_dispatch() {
|
|||||||
let tester = signer_tester();
|
let tester = signer_tester();
|
||||||
let address = tester.accounts.new_account("test").unwrap();
|
let address = tester.accounts.new_account("test").unwrap();
|
||||||
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
|
||||||
tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest {
|
||||||
from: address,
|
from: address,
|
||||||
to: Some(recipient),
|
to: Some(recipient),
|
||||||
gas_price: U256::from(10_000),
|
gas_price: U256::from(10_000),
|
||||||
@ -189,7 +189,7 @@ fn should_confirm_transaction_and_dispatch() {
|
|||||||
let signature = tester.accounts.sign(address, t.hash()).unwrap();
|
let signature = tester.accounts.sign(address, t.hash()).unwrap();
|
||||||
let t = t.with_signature(signature);
|
let t = t.with_signature(signature);
|
||||||
|
|
||||||
assert_eq!(tester.queue.requests().len(), 1);
|
assert_eq!(tester.signer.requests().len(), 1);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let request = r#"{
|
let request = r#"{
|
||||||
@ -202,7 +202,24 @@ fn should_confirm_transaction_and_dispatch() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
assert_eq!(tester.queue.requests().len(), 0);
|
assert_eq!(tester.signer.requests().len(), 0);
|
||||||
assert_eq!(tester.miner.imported_transactions.lock().len(), 1);
|
assert_eq!(tester.miner.imported_transactions.lock().len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_generate_new_token() {
|
||||||
|
// given
|
||||||
|
let tester = signer_tester();
|
||||||
|
|
||||||
|
// when
|
||||||
|
let request = r#"{
|
||||||
|
"jsonrpc":"2.0",
|
||||||
|
"method":"personal_generateAuthorizationToken",
|
||||||
|
"params":[],
|
||||||
|
"id":1
|
||||||
|
}"#;
|
||||||
|
let response = r#"{"jsonrpc":"2.0","result":"new_token","id":1}"#;
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
|
||||||
|
}
|
||||||
|
@ -18,186 +18,185 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
|
|
||||||
/// Eth rpc interface.
|
use v1::types::{Block, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index};
|
||||||
pub trait Eth: Sized + Send + Sync + 'static {
|
use v1::types::{Log, Receipt, SyncStatus, Transaction, Work};
|
||||||
/// Returns protocol version.
|
use v1::types::{H64, H160, H256, U256};
|
||||||
fn protocol_version(&self, _: Params) -> Result<Value, Error>;
|
|
||||||
|
|
||||||
/// Returns an object with data about the sync status or false. (wtf?)
|
use v1::helpers::auto_args::{Trailing, Wrap};
|
||||||
fn syncing(&self, _: Params) -> Result<Value, Error>;
|
|
||||||
|
|
||||||
/// Returns the number of hashes per second that the node is mining with.
|
build_rpc_trait! {
|
||||||
fn hashrate(&self, _: Params) -> Result<Value, Error>;
|
/// Eth rpc interface.
|
||||||
|
pub trait Eth {
|
||||||
|
/// Returns protocol version encoded as a string (quotes are necessary).
|
||||||
|
#[name("eth_protocolVersion")]
|
||||||
|
fn protocol_version(&self) -> Result<String, Error>;
|
||||||
|
|
||||||
/// Returns block author.
|
/// Returns an object with data about the sync status or false. (wtf?)
|
||||||
fn author(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_syncing")]
|
||||||
|
fn syncing(&self) -> Result<SyncStatus, Error>;
|
||||||
|
|
||||||
/// Returns true if client is actively mining new blocks.
|
/// Returns the number of hashes per second that the node is mining with.
|
||||||
fn is_mining(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_hashrate")]
|
||||||
|
fn hashrate(&self) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns current gas_price.
|
/// Returns block author.
|
||||||
fn gas_price(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_coinbase")]
|
||||||
|
fn author(&self) -> Result<H160, Error>;
|
||||||
|
|
||||||
/// Returns accounts list.
|
/// Returns true if client is actively mining new blocks.
|
||||||
fn accounts(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_mining")]
|
||||||
|
fn is_mining(&self) -> Result<bool, Error>;
|
||||||
|
|
||||||
/// Returns highest block number.
|
/// Returns current gas_price.
|
||||||
fn block_number(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_gasPrice")]
|
||||||
|
fn gas_price(&self) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns balance of the given account.
|
/// Returns accounts list.
|
||||||
fn balance(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_accounts")]
|
||||||
|
fn accounts(&self) -> Result<Vec<H160>, Error>;
|
||||||
|
|
||||||
/// Returns content of the storage at given address.
|
/// Returns highest block number.
|
||||||
fn storage_at(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_blockNumber")]
|
||||||
|
fn block_number(&self) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns block with given hash.
|
/// Returns balance of the given account.
|
||||||
fn block_by_hash(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getBalance")]
|
||||||
|
fn balance(&self, H160, Trailing<BlockNumber>) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns block with given number.
|
/// Returns content of the storage at given address.
|
||||||
fn block_by_number(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getStorageAt")]
|
||||||
|
fn storage_at(&self, H160, U256, Trailing<BlockNumber>) -> Result<H256, Error>;
|
||||||
|
|
||||||
/// Returns the number of transactions sent from given address at given time (block number).
|
/// Returns block with given hash.
|
||||||
fn transaction_count(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getBlockByHash")]
|
||||||
|
fn block_by_hash(&self, H256, bool) -> Result<Option<Block>, Error>;
|
||||||
|
|
||||||
/// Returns the number of transactions in a block with given hash.
|
/// Returns block with given number.
|
||||||
fn block_transaction_count_by_hash(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getBlockByNumber")]
|
||||||
|
fn block_by_number(&self, BlockNumber, bool) -> Result<Option<Block>, Error>;
|
||||||
|
|
||||||
/// Returns the number of transactions in a block with given block number.
|
/// Returns the number of transactions sent from given address at given time (block number).
|
||||||
fn block_transaction_count_by_number(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getTransactionCount")]
|
||||||
|
fn transaction_count(&self, H160, Trailing<BlockNumber>) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns the number of uncles in a block with given hash.
|
/// Returns the number of transactions in a block with given hash.
|
||||||
fn block_uncles_count_by_hash(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getBlockTransactionCountByHash")]
|
||||||
|
fn block_transaction_count_by_hash(&self, H256) -> Result<Option<U256>, Error>;
|
||||||
|
|
||||||
/// Returns the number of uncles in a block with given block number.
|
/// Returns the number of transactions in a block with given block number.
|
||||||
fn block_uncles_count_by_number(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getBlockTransactionCountByNumber")]
|
||||||
|
fn block_transaction_count_by_number(&self, BlockNumber) -> Result<Option<U256>, Error>;
|
||||||
|
|
||||||
/// Returns the code at given address at given time (block number).
|
/// Returns the number of uncles in a block with given hash.
|
||||||
fn code_at(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getUncleCountByBlockHash")]
|
||||||
|
fn block_uncles_count_by_hash(&self, H256) -> Result<Option<U256>, Error>;
|
||||||
|
|
||||||
/// Sends signed transaction.
|
/// Returns the number of uncles in a block with given block number.
|
||||||
fn send_raw_transaction(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getUncleCountByBlockNumber")]
|
||||||
|
fn block_uncles_count_by_number(&self, BlockNumber) -> Result<Option<U256>, Error>;
|
||||||
|
|
||||||
/// Call contract.
|
/// Returns the code at given address at given time (block number).
|
||||||
fn call(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getCode")]
|
||||||
|
fn code_at(&self, H160, Trailing<BlockNumber>) -> Result<Bytes, Error>;
|
||||||
|
|
||||||
/// Estimate gas needed for execution of given contract.
|
/// Sends signed transaction, returning its hash.
|
||||||
fn estimate_gas(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_sendRawTransaction")]
|
||||||
|
fn send_raw_transaction(&self, Bytes) -> Result<H256, Error>;
|
||||||
|
|
||||||
/// Get transaction by its hash.
|
/// Call contract, returning the output data.
|
||||||
fn transaction_by_hash(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_call")]
|
||||||
|
fn call(&self, CallRequest, Trailing<BlockNumber>) -> Result<Bytes, Error>;
|
||||||
|
|
||||||
/// Returns transaction at given block hash and index.
|
/// Estimate gas needed for execution of given contract.
|
||||||
fn transaction_by_block_hash_and_index(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_estimateGas")]
|
||||||
|
fn estimate_gas(&self, CallRequest, Trailing<BlockNumber>) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns transaction by given block number and index.
|
/// Get transaction by its hash.
|
||||||
fn transaction_by_block_number_and_index(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getTransactionByHash")]
|
||||||
|
fn transaction_by_hash(&self, H256) -> Result<Option<Transaction>, Error>;
|
||||||
|
|
||||||
/// Returns transaction receipt.
|
/// Returns transaction at given block hash and index.
|
||||||
fn transaction_receipt(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getTransactionByBlockHashAndIndex")]
|
||||||
|
fn transaction_by_block_hash_and_index(&self, H256, Index) -> Result<Option<Transaction>, Error>;
|
||||||
|
|
||||||
/// Returns an uncles at given block and index.
|
/// Returns transaction by given block number and index.
|
||||||
fn uncle_by_block_hash_and_index(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getTransactionByBlockNumberAndIndex")]
|
||||||
|
fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> Result<Option<Transaction>, Error>;
|
||||||
|
|
||||||
/// Returns an uncles at given block and index.
|
/// Returns transaction receipt.
|
||||||
fn uncle_by_block_number_and_index(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getTransactionReceipt")]
|
||||||
|
fn transaction_receipt(&self, H256) -> Result<Option<Receipt>, Error>;
|
||||||
|
|
||||||
/// Returns available compilers.
|
/// Returns an uncles at given block and index.
|
||||||
fn compilers(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getUncleByBlockHashAndIndex")]
|
||||||
|
fn uncle_by_block_hash_and_index(&self, H256, Index) -> Result<Option<Block>, Error>;
|
||||||
|
|
||||||
/// Compiles lll code.
|
/// Returns an uncles at given block and index.
|
||||||
fn compile_lll(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getUncleByBlockNumberAndIndex")]
|
||||||
|
fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> Result<Option<Block>, Error>;
|
||||||
|
|
||||||
/// Compiles solidity.
|
/// Returns available compilers.
|
||||||
fn compile_solidity(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getCompilers")]
|
||||||
|
fn compilers(&self) -> Result<Vec<String>, Error>;
|
||||||
|
|
||||||
/// Compiles serpent.
|
/// Compiles lll code.
|
||||||
fn compile_serpent(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_compileLLL")]
|
||||||
|
fn compile_lll(&self, String) -> Result<Bytes, Error>;
|
||||||
|
|
||||||
/// Returns logs matching given filter object.
|
/// Compiles solidity.
|
||||||
fn logs(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_compileSolidity")]
|
||||||
|
fn compile_solidity(&self, String) -> Result<Bytes, Error>;
|
||||||
|
|
||||||
/// Returns the hash of the current block, the seedHash, and the boundary condition to be met.
|
/// Compiles serpent.
|
||||||
fn work(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_compileSerpent")]
|
||||||
|
fn compile_serpent(&self, String) -> Result<Bytes, Error>;
|
||||||
|
|
||||||
/// Used for submitting a proof-of-work solution.
|
/// Returns logs matching given filter object.
|
||||||
fn submit_work(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getLogs")]
|
||||||
|
fn logs(&self, Filter) -> Result<Vec<Log>, Error>;
|
||||||
|
|
||||||
/// Used for submitting mining hashrate.
|
/// Returns the hash of the current block, the seedHash, and the boundary condition to be met.
|
||||||
fn submit_hashrate(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getWork")]
|
||||||
|
fn work(&self, Trailing<u64>) -> Result<Work, Error>;
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Used for submitting a proof-of-work solution.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
#[name("eth_submitWork")]
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
fn submit_work(&self, H64, H256, H256) -> Result<bool, Error>;
|
||||||
delegate.add_method("eth_protocolVersion", Eth::protocol_version);
|
|
||||||
delegate.add_method("eth_syncing", Eth::syncing);
|
/// Used for submitting mining hashrate.
|
||||||
delegate.add_method("eth_hashrate", Eth::hashrate);
|
#[name("eth_submitHashrate")]
|
||||||
delegate.add_method("eth_coinbase", Eth::author);
|
fn submit_hashrate(&self, U256, H256) -> Result<bool, Error>;
|
||||||
delegate.add_method("eth_mining", Eth::is_mining);
|
|
||||||
delegate.add_method("eth_gasPrice", Eth::gas_price);
|
|
||||||
delegate.add_method("eth_accounts", Eth::accounts);
|
|
||||||
delegate.add_method("eth_blockNumber", Eth::block_number);
|
|
||||||
delegate.add_method("eth_getBalance", Eth::balance);
|
|
||||||
delegate.add_method("eth_getStorageAt", Eth::storage_at);
|
|
||||||
delegate.add_method("eth_getTransactionCount", Eth::transaction_count);
|
|
||||||
delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash);
|
|
||||||
delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number);
|
|
||||||
delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash);
|
|
||||||
delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number);
|
|
||||||
delegate.add_method("eth_getCode", Eth::code_at);
|
|
||||||
delegate.add_method("eth_sendRawTransaction", Eth::send_raw_transaction);
|
|
||||||
delegate.add_method("eth_call", Eth::call);
|
|
||||||
delegate.add_method("eth_estimateGas", Eth::estimate_gas);
|
|
||||||
delegate.add_method("eth_getBlockByHash", Eth::block_by_hash);
|
|
||||||
delegate.add_method("eth_getBlockByNumber", Eth::block_by_number);
|
|
||||||
delegate.add_method("eth_getTransactionByHash", Eth::transaction_by_hash);
|
|
||||||
delegate.add_method("eth_getTransactionByBlockHashAndIndex", Eth::transaction_by_block_hash_and_index);
|
|
||||||
delegate.add_method("eth_getTransactionByBlockNumberAndIndex", Eth::transaction_by_block_number_and_index);
|
|
||||||
delegate.add_method("eth_getTransactionReceipt", Eth::transaction_receipt);
|
|
||||||
delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_by_block_hash_and_index);
|
|
||||||
delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_by_block_number_and_index);
|
|
||||||
delegate.add_method("eth_getCompilers", Eth::compilers);
|
|
||||||
delegate.add_method("eth_compileLLL", Eth::compile_lll);
|
|
||||||
delegate.add_method("eth_compileSolidity", Eth::compile_solidity);
|
|
||||||
delegate.add_method("eth_compileSerpent", Eth::compile_serpent);
|
|
||||||
delegate.add_method("eth_getLogs", Eth::logs);
|
|
||||||
delegate.add_method("eth_getWork", Eth::work);
|
|
||||||
delegate.add_method("eth_submitWork", Eth::submit_work);
|
|
||||||
delegate.add_method("eth_submitHashrate", Eth::submit_hashrate);
|
|
||||||
delegate
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Eth filters rpc api (polling).
|
build_rpc_trait! {
|
||||||
// TODO: do filters api properly
|
|
||||||
pub trait EthFilter: Sized + Send + Sync + 'static {
|
|
||||||
/// Returns id of new filter.
|
|
||||||
fn new_filter(&self, _: Params) -> Result<Value, Error>;
|
|
||||||
|
|
||||||
/// Returns id of new block filter.
|
/// Eth filters rpc api (polling).
|
||||||
fn new_block_filter(&self, _: Params) -> Result<Value, Error>;
|
// TODO: do filters api properly
|
||||||
|
pub trait EthFilter {
|
||||||
|
/// Returns id of new filter.
|
||||||
|
#[name("eth_newFilter")]
|
||||||
|
fn new_filter(&self, Filter) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns id of new block filter.
|
/// Returns id of new block filter.
|
||||||
fn new_pending_transaction_filter(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_newBlockFilter")]
|
||||||
|
fn new_block_filter(&self) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns filter changes since last poll.
|
/// Returns id of new block filter.
|
||||||
fn filter_changes(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_newPendingTransactionFilter")]
|
||||||
|
fn new_pending_transaction_filter(&self) -> Result<U256, Error>;
|
||||||
|
|
||||||
/// Returns all logs matching given filter (in a range 'from' - 'to').
|
/// Returns filter changes since last poll.
|
||||||
fn filter_logs(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getFilterChanges")]
|
||||||
|
fn filter_changes(&self, Index) -> Result<FilterChanges, Error>;
|
||||||
|
|
||||||
/// Uninstalls filter.
|
/// Returns all logs matching given filter (in a range 'from' - 'to').
|
||||||
fn uninstall_filter(&self, _: Params) -> Result<Value, Error>;
|
#[name("eth_getFilterLogs")]
|
||||||
|
fn filter_logs(&self, Index) -> Result<Vec<Log>, Error>;
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Uninstalls filter.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
#[name("eth_uninstallFilter")]
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
fn uninstall_filter(&self, Index) -> Result<bool, Error>;
|
||||||
delegate.add_method("eth_newFilter", EthFilter::new_filter);
|
|
||||||
delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter);
|
|
||||||
delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter);
|
|
||||||
delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes);
|
|
||||||
delegate.add_method("eth_getFilterLogs", EthFilter::filter_logs);
|
|
||||||
delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter);
|
|
||||||
delegate
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,6 +226,10 @@ pub trait EthSigning: Sized + Send + Sync + 'static {
|
|||||||
/// or an error.
|
/// or an error.
|
||||||
fn check_request(&self, _: Params) -> Result<Value, Error>;
|
fn check_request(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
|
/// Decrypt some ECIES-encrypted message.
|
||||||
|
/// First parameter is the address with which it is encrypted, second is the ciphertext.
|
||||||
|
fn decrypt_message(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Should be used to convert object to io delegate.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
fn to_delegate(self) -> IoDelegate<Self> {
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||||
@ -235,6 +238,7 @@ pub trait EthSigning: Sized + Send + Sync + 'static {
|
|||||||
delegate.add_method("eth_postSign", EthSigning::post_sign);
|
delegate.add_method("eth_postSign", EthSigning::post_sign);
|
||||||
delegate.add_method("eth_postTransaction", EthSigning::post_transaction);
|
delegate.add_method("eth_postTransaction", EthSigning::post_transaction);
|
||||||
delegate.add_method("eth_checkRequest", EthSigning::check_request);
|
delegate.add_method("eth_checkRequest", EthSigning::check_request);
|
||||||
|
delegate.add_method("ethcore_decryptMessage", EthSigning::decrypt_message);
|
||||||
delegate
|
delegate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -76,6 +76,13 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
|
|||||||
/// Returns the value of the registrar for this network.
|
/// Returns the value of the registrar for this network.
|
||||||
fn registry_address(&self, _: Params) -> Result<Value, Error>;
|
fn registry_address(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
|
/// Encrypt some data with a public key under ECIES.
|
||||||
|
/// First parameter is the 512-byte destination public key, second is the message.
|
||||||
|
fn encrypt_message(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
|
/// Returns all pending (current) transactions from transaction queue.
|
||||||
|
fn pending_transactions(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Should be used to convert object to io delegate.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
fn to_delegate(self) -> IoDelegate<Self> {
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||||
@ -98,7 +105,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
|
|||||||
delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase);
|
delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase);
|
||||||
delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address);
|
delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address);
|
||||||
delegate.add_method("ethcore_registryAddress", Ethcore::registry_address);
|
delegate.add_method("ethcore_registryAddress", Ethcore::registry_address);
|
||||||
|
delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message);
|
||||||
|
delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions);
|
||||||
delegate
|
delegate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -92,12 +92,16 @@ pub trait PersonalSigner: Sized + Send + Sync + 'static {
|
|||||||
/// Reject the confirmation request.
|
/// Reject the confirmation request.
|
||||||
fn reject_request(&self, _: Params) -> Result<Value, Error>;
|
fn reject_request(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
|
/// Generates new authorization token.
|
||||||
|
fn generate_token(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Should be used to convert object to io delegate.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
fn to_delegate(self) -> IoDelegate<Self> {
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||||
delegate.add_method("personal_requestsToConfirm", PersonalSigner::requests_to_confirm);
|
delegate.add_method("personal_requestsToConfirm", PersonalSigner::requests_to_confirm);
|
||||||
delegate.add_method("personal_confirmRequest", PersonalSigner::confirm_request);
|
delegate.add_method("personal_confirmRequest", PersonalSigner::confirm_request);
|
||||||
delegate.add_method("personal_rejectRequest", PersonalSigner::reject_request);
|
delegate.add_method("personal_rejectRequest", PersonalSigner::reject_request);
|
||||||
|
delegate.add_method("personal_generateAuthorizationToken", PersonalSigner::generate_token);
|
||||||
delegate
|
delegate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ mod tests {
|
|||||||
fn test_serialize_block_transactions() {
|
fn test_serialize_block_transactions() {
|
||||||
let t = BlockTransactions::Full(vec![Transaction::default()]);
|
let t = BlockTransactions::Full(vec![Transaction::default()]);
|
||||||
let serialized = serde_json::to_string(&t).unwrap();
|
let serialized = serde_json::to_string(&t).unwrap();
|
||||||
assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x"}]"#);
|
assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null}]"#);
|
||||||
|
|
||||||
let t = BlockTransactions::Hashes(vec![H256::default().into()]);
|
let t = BlockTransactions::Hashes(vec![H256::default().into()]);
|
||||||
let serialized = serde_json::to_string(&t).unwrap();
|
let serialized = serde_json::to_string(&t).unwrap();
|
||||||
|
@ -31,6 +31,12 @@ pub enum BlockNumber {
|
|||||||
Pending,
|
Pending,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for BlockNumber {
|
||||||
|
fn default() -> Self {
|
||||||
|
BlockNumber::Latest
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Deserialize for BlockNumber {
|
impl Deserialize for BlockNumber {
|
||||||
fn deserialize<D>(deserializer: &mut D) -> Result<BlockNumber, D::Error>
|
fn deserialize<D>(deserializer: &mut D) -> Result<BlockNumber, D::Error>
|
||||||
where D: Deserializer {
|
where D: Deserializer {
|
||||||
|
@ -70,10 +70,16 @@ impl Visitor for BytesVisitor {
|
|||||||
type Value = Bytes;
|
type Value = Bytes;
|
||||||
|
|
||||||
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
|
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
|
||||||
if value.len() >= 2 && &value[0..2] == "0x" {
|
if value.is_empty() {
|
||||||
Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![])))
|
warn!(
|
||||||
|
target: "deprecated",
|
||||||
|
"Deserializing empty string as empty bytes. This is a non-standard behaviour that will be removed in future versions. Please update your code to send `0x` instead!"
|
||||||
|
);
|
||||||
|
Ok(Bytes::new(Vec::new()))
|
||||||
|
} else if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 {
|
||||||
|
Ok(Bytes::new(try!(FromHex::from_hex(&value[2..]).map_err(|_| Error::custom("invalid hex")))))
|
||||||
} else {
|
} else {
|
||||||
Err(Error::custom("invalid hex"))
|
Err(Error::custom("invalid format"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,5 +101,31 @@ mod tests {
|
|||||||
let serialized = serde_json::to_string(&bytes).unwrap();
|
let serialized = serde_json::to_string(&bytes).unwrap();
|
||||||
assert_eq!(serialized, r#""0x0123456789abcdef""#);
|
assert_eq!(serialized, r#""0x0123456789abcdef""#);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bytes_deserialize() {
|
||||||
|
// TODO [ToDr] Uncomment when Mist starts sending correct data
|
||||||
|
// let bytes1: Result<Bytes, serde_json::Error> = serde_json::from_str(r#""""#);
|
||||||
|
let bytes2: Result<Bytes, serde_json::Error> = serde_json::from_str(r#""0x123""#);
|
||||||
|
let bytes3: Result<Bytes, serde_json::Error> = serde_json::from_str(r#""0xgg""#);
|
||||||
|
|
||||||
|
let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap();
|
||||||
|
let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap();
|
||||||
|
let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap();
|
||||||
|
|
||||||
|
// assert!(bytes1.is_err());
|
||||||
|
assert!(bytes2.is_err());
|
||||||
|
assert!(bytes3.is_err());
|
||||||
|
assert_eq!(bytes4, Bytes(vec![]));
|
||||||
|
assert_eq!(bytes5, Bytes(vec![0x12]));
|
||||||
|
assert_eq!(bytes6, Bytes(vec![0x1, 0x23]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO [ToDr] Remove when Mist starts sending correct data
|
||||||
|
#[test]
|
||||||
|
fn test_bytes_lenient_against_the_spec_deserialize_for_empty_string_for_mist_compatibility() {
|
||||||
|
let deserialized: Bytes = serde_json::from_str(r#""""#).unwrap();
|
||||||
|
assert_eq!(deserialized, Bytes(Vec::new()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,12 +14,12 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use serde::{Deserialize, Deserializer, Error};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer, Error};
|
||||||
use serde_json::value;
|
use serde_json::value;
|
||||||
use jsonrpc_core::Value;
|
use jsonrpc_core::Value;
|
||||||
use ethcore::filter::Filter as EthFilter;
|
use ethcore::filter::Filter as EthFilter;
|
||||||
use ethcore::client::BlockID;
|
use ethcore::client::BlockID;
|
||||||
use v1::types::{BlockNumber, H160, H256};
|
use v1::types::{BlockNumber, H160, H256, Log};
|
||||||
|
|
||||||
/// Variadic value
|
/// Variadic value
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
@ -66,6 +66,8 @@ pub struct Filter {
|
|||||||
pub address: Option<FilterAddress>,
|
pub address: Option<FilterAddress>,
|
||||||
/// Topics
|
/// Topics
|
||||||
pub topics: Option<Vec<Topic>>,
|
pub topics: Option<Vec<Topic>>,
|
||||||
|
/// Limit
|
||||||
|
pub limit: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Into<EthFilter> for Filter {
|
impl Into<EthFilter> for Filter {
|
||||||
@ -85,7 +87,29 @@ impl Into<EthFilter> for Filter {
|
|||||||
VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect())
|
VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect())
|
||||||
}).filter_map(|m| m).collect()).into_iter();
|
}).filter_map(|m| m).collect()).into_iter();
|
||||||
vec![iter.next(), iter.next(), iter.next(), iter.next()]
|
vec![iter.next(), iter.next(), iter.next(), iter.next()]
|
||||||
}
|
},
|
||||||
|
limit: self.limit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Results of the filter_changes RPC.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub enum FilterChanges {
|
||||||
|
/// New logs.
|
||||||
|
Logs(Vec<Log>),
|
||||||
|
/// New hashes (block or transactions)
|
||||||
|
Hashes(Vec<H256>),
|
||||||
|
/// Empty result,
|
||||||
|
Empty,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for FilterChanges {
|
||||||
|
fn serialize<S>(&self, s: &mut S) -> Result<(), S::Error> where S: Serializer {
|
||||||
|
match *self {
|
||||||
|
FilterChanges::Logs(ref logs) => logs.serialize(s),
|
||||||
|
FilterChanges::Hashes(ref hashes) => hashes.serialize(s),
|
||||||
|
FilterChanges::Empty => (&[] as &[Value]).serialize(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,7 +144,8 @@ mod tests {
|
|||||||
from_block: Some(BlockNumber::Earliest),
|
from_block: Some(BlockNumber::Earliest),
|
||||||
to_block: Some(BlockNumber::Latest),
|
to_block: Some(BlockNumber::Latest),
|
||||||
address: None,
|
address: None,
|
||||||
topics: None
|
topics: None,
|
||||||
|
limit: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ use std::cmp::Ordering;
|
|||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use serde;
|
use serde;
|
||||||
use rustc_serialize::hex::{ToHex, FromHex};
|
use rustc_serialize::hex::{ToHex, FromHex};
|
||||||
use util::{H64 as Eth64, H256 as EthH256, H520 as EthH520, H2048 as Eth2048, H160 as Eth160};
|
use util::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as Eth512, H2048 as Eth2048};
|
||||||
|
|
||||||
macro_rules! impl_hash {
|
macro_rules! impl_hash {
|
||||||
($name: ident, $other: ident, $size: expr) => {
|
($name: ident, $other: ident, $size: expr) => {
|
||||||
@ -144,6 +144,7 @@ macro_rules! impl_hash {
|
|||||||
|
|
||||||
impl_hash!(H64, Eth64, 8);
|
impl_hash!(H64, Eth64, 8);
|
||||||
impl_hash!(H160, Eth160, 20);
|
impl_hash!(H160, Eth160, 20);
|
||||||
impl_hash!(H256, EthH256, 32);
|
impl_hash!(H256, Eth256, 32);
|
||||||
impl_hash!(H520, EthH520, 65);
|
impl_hash!(H512, Eth512, 64);
|
||||||
|
impl_hash!(H520, Eth520, 65);
|
||||||
impl_hash!(H2048, Eth2048, 256);
|
impl_hash!(H2048, Eth2048, 256);
|
||||||
|
@ -30,14 +30,15 @@ mod receipt;
|
|||||||
mod trace;
|
mod trace;
|
||||||
mod trace_filter;
|
mod trace_filter;
|
||||||
mod uint;
|
mod uint;
|
||||||
|
mod work;
|
||||||
|
|
||||||
pub use self::bytes::Bytes;
|
pub use self::bytes::Bytes;
|
||||||
pub use self::block::{Block, BlockTransactions};
|
pub use self::block::{Block, BlockTransactions};
|
||||||
pub use self::block_number::BlockNumber;
|
pub use self::block_number::BlockNumber;
|
||||||
pub use self::call_request::CallRequest;
|
pub use self::call_request::CallRequest;
|
||||||
pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, TransactionModification};
|
pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, TransactionModification};
|
||||||
pub use self::filter::Filter;
|
pub use self::filter::{Filter, FilterChanges};
|
||||||
pub use self::hash::{H64, H160, H256, H520, H2048};
|
pub use self::hash::{H64, H160, H256, H512, H520, H2048};
|
||||||
pub use self::index::Index;
|
pub use self::index::Index;
|
||||||
pub use self::log::Log;
|
pub use self::log::Log;
|
||||||
pub use self::sync::{SyncStatus, SyncInfo, Peers};
|
pub use self::sync::{SyncStatus, SyncInfo, Peers};
|
||||||
@ -47,3 +48,4 @@ pub use self::receipt::Receipt;
|
|||||||
pub use self::trace::{LocalizedTrace, TraceResults};
|
pub use self::trace::{LocalizedTrace, TraceResults};
|
||||||
pub use self::trace_filter::TraceFilter;
|
pub use self::trace_filter::TraceFilter;
|
||||||
pub use self::uint::U256;
|
pub use self::uint::U256;
|
||||||
|
pub use self::work::Work;
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
use ethcore::contract_address;
|
use ethcore::contract_address;
|
||||||
use ethcore::transaction::{LocalizedTransaction, Action, SignedTransaction};
|
use ethcore::transaction::{LocalizedTransaction, Action, SignedTransaction};
|
||||||
use v1::types::{Bytes, H160, H256, U256};
|
use v1::types::{Bytes, H160, H256, U256, H512};
|
||||||
|
|
||||||
/// Transaction
|
/// Transaction
|
||||||
#[derive(Debug, Default, Serialize)]
|
#[derive(Debug, Default, Serialize)]
|
||||||
@ -51,6 +51,9 @@ pub struct Transaction {
|
|||||||
pub creates: Option<H160>,
|
pub creates: Option<H160>,
|
||||||
/// Raw transaction data
|
/// Raw transaction data
|
||||||
pub raw: Bytes,
|
pub raw: Bytes,
|
||||||
|
/// Public key of the signer.
|
||||||
|
#[serde(rename="publicKey")]
|
||||||
|
pub public_key: Option<H512>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<LocalizedTransaction> for Transaction {
|
impl From<LocalizedTransaction> for Transaction {
|
||||||
@ -75,6 +78,7 @@ impl From<LocalizedTransaction> for Transaction {
|
|||||||
Action::Call(_) => None,
|
Action::Call(_) => None,
|
||||||
},
|
},
|
||||||
raw: ::rlp::encode(&t.signed).to_vec().into(),
|
raw: ::rlp::encode(&t.signed).to_vec().into(),
|
||||||
|
public_key: t.public_key().ok().map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,6 +105,7 @@ impl From<SignedTransaction> for Transaction {
|
|||||||
Action::Call(_) => None,
|
Action::Call(_) => None,
|
||||||
},
|
},
|
||||||
raw: ::rlp::encode(&t).to_vec().into(),
|
raw: ::rlp::encode(&t).to_vec().into(),
|
||||||
|
public_key: t.public_key().ok().map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -114,7 +119,7 @@ mod tests {
|
|||||||
fn test_transaction_serialize() {
|
fn test_transaction_serialize() {
|
||||||
let t = Transaction::default();
|
let t = Transaction::default();
|
||||||
let serialized = serde_json::to_string(&t).unwrap();
|
let serialized = serde_json::to_string(&t).unwrap();
|
||||||
assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x"}"#);
|
assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null}"#);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,6 +77,10 @@ macro_rules! impl_uint {
|
|||||||
return Err(serde::Error::custom("Invalid length."));
|
return Err(serde::Error::custom("Invalid length."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if &value[0..2] != "0x" {
|
||||||
|
return Err(serde::Error::custom("Use hex encoded numbers with 0x prefix."))
|
||||||
|
}
|
||||||
|
|
||||||
$other::from_str(&value[2..]).map($name).map_err(|_| serde::Error::custom("Invalid hex value."))
|
$other::from_str(&value[2..]).map($name).map_err(|_| serde::Error::custom("Invalid hex value."))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,6 +104,8 @@ mod tests {
|
|||||||
use super::U256;
|
use super::U256;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
|
||||||
|
type Res = Result<U256, serde_json::Error>;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_serialize_u256() {
|
fn should_serialize_u256() {
|
||||||
let serialized1 = serde_json::to_string(&U256(0.into())).unwrap();
|
let serialized1 = serde_json::to_string(&U256(0.into())).unwrap();
|
||||||
@ -113,6 +119,21 @@ mod tests {
|
|||||||
assert_eq!(serialized4, r#""0x100""#);
|
assert_eq!(serialized4, r#""0x100""#);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_fail_to_deserialize_decimals() {
|
||||||
|
let deserialized1: Res = serde_json::from_str(r#""""#);
|
||||||
|
let deserialized2: Res = serde_json::from_str(r#""0""#);
|
||||||
|
let deserialized3: Res = serde_json::from_str(r#""10""#);
|
||||||
|
let deserialized4: Res = serde_json::from_str(r#""1000000""#);
|
||||||
|
let deserialized5: Res = serde_json::from_str(r#""1000000000000000000""#);
|
||||||
|
|
||||||
|
assert!(deserialized1.is_err());
|
||||||
|
assert!(deserialized2.is_err());
|
||||||
|
assert!(deserialized3.is_err());
|
||||||
|
assert!(deserialized4.is_err());
|
||||||
|
assert!(deserialized5.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_deserialize_u256() {
|
fn should_deserialize_u256() {
|
||||||
let deserialized1: U256 = serde_json::from_str(r#""0x""#).unwrap();
|
let deserialized1: U256 = serde_json::from_str(r#""0x""#).unwrap();
|
||||||
|
43
rpc/src/v1/types/work.rs
Normal file
43
rpc/src/v1/types/work.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use super::{H256, U256};
|
||||||
|
|
||||||
|
use serde::{Serialize, Serializer};
|
||||||
|
|
||||||
|
/// The result of an `eth_getWork` call: it differs based on an option
|
||||||
|
/// whether to send the block number.
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub struct Work {
|
||||||
|
/// The proof-of-work hash.
|
||||||
|
pub pow_hash: H256,
|
||||||
|
/// The seed hash.
|
||||||
|
pub seed_hash: H256,
|
||||||
|
/// The target.
|
||||||
|
pub target: H256,
|
||||||
|
/// The block number: this isn't always stored.
|
||||||
|
pub number: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for Work {
|
||||||
|
fn serialize<S>(&self, s: &mut S) -> Result<(), S::Error> where S: Serializer {
|
||||||
|
match self.number.as_ref() {
|
||||||
|
Some(num) => (&self.pow_hash, &self.seed_hash, &self.target, U256::from(*num)).serialize(s),
|
||||||
|
None => (&self.pow_hash, &self.seed_hash, &self.target).serialize(s),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -48,6 +48,7 @@ impl TimeProvider for DefaultTimeProvider {
|
|||||||
/// No of seconds the hash is valid
|
/// No of seconds the hash is valid
|
||||||
const TIME_THRESHOLD: u64 = 7;
|
const TIME_THRESHOLD: u64 = 7;
|
||||||
const TOKEN_LENGTH: usize = 16;
|
const TOKEN_LENGTH: usize = 16;
|
||||||
|
const INITIAL_TOKEN: &'static str = "initial";
|
||||||
|
|
||||||
/// Manages authorization codes for `SignerUIs`
|
/// Manages authorization codes for `SignerUIs`
|
||||||
pub struct AuthCodes<T: TimeProvider = DefaultTimeProvider> {
|
pub struct AuthCodes<T: TimeProvider = DefaultTimeProvider> {
|
||||||
@ -98,7 +99,7 @@ impl<T: TimeProvider> AuthCodes<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if given hash is correct identifier of `SignerUI`
|
/// Checks if given hash is correct identifier of `SignerUI`
|
||||||
pub fn is_valid(&self, hash: &H256, time: u64) -> bool {
|
pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool {
|
||||||
let now = self.now.now();
|
let now = self.now.now();
|
||||||
// check time
|
// check time
|
||||||
if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD {
|
if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD {
|
||||||
@ -106,9 +107,21 @@ impl<T: TimeProvider> AuthCodes<T> {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let as_token = |code| format!("{}:{}", code, time).sha3();
|
||||||
|
|
||||||
|
// Check if it's the initial token.
|
||||||
|
if self.is_empty() {
|
||||||
|
let initial = &as_token(INITIAL_TOKEN) == hash;
|
||||||
|
// Initial token can be used only once.
|
||||||
|
if initial {
|
||||||
|
let _ = self.generate_new();
|
||||||
|
}
|
||||||
|
return initial;
|
||||||
|
}
|
||||||
|
|
||||||
// look for code
|
// look for code
|
||||||
self.codes.iter()
|
self.codes.iter()
|
||||||
.any(|code| &format!("{}:{}", code, time).sha3() == hash)
|
.any(|code| &as_token(code) == hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates and returns a new code that can be used by `SignerUIs`
|
/// Generates and returns a new code that can be used by `SignerUIs`
|
||||||
@ -124,6 +137,11 @@ impl<T: TimeProvider> AuthCodes<T> {
|
|||||||
self.codes.push(code);
|
self.codes.push(code);
|
||||||
Ok(readable_code)
|
Ok(readable_code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if there are no tokens in this store
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.codes.is_empty()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -137,12 +155,28 @@ mod tests {
|
|||||||
format!("{}:{}", val, time).sha3()
|
format!("{}:{}", val, time).sha3()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_return_true_if_code_is_initial_and_store_is_empty() {
|
||||||
|
// given
|
||||||
|
let code = "initial";
|
||||||
|
let time = 99;
|
||||||
|
let mut codes = AuthCodes::new(vec![], || 100);
|
||||||
|
|
||||||
|
// when
|
||||||
|
let res1 = codes.is_valid(&generate_hash(code, time), time);
|
||||||
|
let res2 = codes.is_valid(&generate_hash(code, time), time);
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(res1, true);
|
||||||
|
assert_eq!(res2, false);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_return_true_if_hash_is_valid() {
|
fn should_return_true_if_hash_is_valid() {
|
||||||
// given
|
// given
|
||||||
let code = "23521352asdfasdfadf";
|
let code = "23521352asdfasdfadf";
|
||||||
let time = 99;
|
let time = 99;
|
||||||
let codes = AuthCodes::new(vec![code.into()], || 100);
|
let mut codes = AuthCodes::new(vec![code.into()], || 100);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let res = codes.is_valid(&generate_hash(code, time), time);
|
let res = codes.is_valid(&generate_hash(code, time), time);
|
||||||
@ -156,7 +190,7 @@ mod tests {
|
|||||||
// given
|
// given
|
||||||
let code = "23521352asdfasdfadf";
|
let code = "23521352asdfasdfadf";
|
||||||
let time = 99;
|
let time = 99;
|
||||||
let codes = AuthCodes::new(vec!["1".into()], || 100);
|
let mut codes = AuthCodes::new(vec!["1".into()], || 100);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let res = codes.is_valid(&generate_hash(code, time), time);
|
let res = codes.is_valid(&generate_hash(code, time), time);
|
||||||
@ -171,7 +205,7 @@ mod tests {
|
|||||||
let code = "23521352asdfasdfadf";
|
let code = "23521352asdfasdfadf";
|
||||||
let time = 107;
|
let time = 107;
|
||||||
let time2 = 93;
|
let time2 = 93;
|
||||||
let codes = AuthCodes::new(vec![code.into()], || 100);
|
let mut codes = AuthCodes::new(vec![code.into()], || 100);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let res1 = codes.is_valid(&generate_hash(code, time), time);
|
let res1 = codes.is_valid(&generate_hash(code, time), time);
|
||||||
|
@ -14,24 +14,48 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::env;
|
use std::ops::{Deref, DerefMut};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::Duration;
|
use std::time::{self, Duration};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use devtools::http_client;
|
use devtools::{http_client, RandomTempPath};
|
||||||
use rpc::ConfirmationsQueue;
|
use rpc::ConfirmationsQueue;
|
||||||
|
use util::Hashable;
|
||||||
use rand;
|
use rand;
|
||||||
|
|
||||||
use ServerBuilder;
|
use ServerBuilder;
|
||||||
use Server;
|
use Server;
|
||||||
|
use AuthCodes;
|
||||||
|
|
||||||
pub fn serve() -> Server {
|
pub struct GuardedAuthCodes {
|
||||||
|
authcodes: AuthCodes,
|
||||||
|
path: RandomTempPath,
|
||||||
|
}
|
||||||
|
impl Deref for GuardedAuthCodes {
|
||||||
|
type Target = AuthCodes;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.authcodes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl DerefMut for GuardedAuthCodes {
|
||||||
|
fn deref_mut(&mut self) -> &mut AuthCodes {
|
||||||
|
&mut self.authcodes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn serve() -> (Server, usize, GuardedAuthCodes) {
|
||||||
|
let mut path = RandomTempPath::new();
|
||||||
|
path.panic_on_drop_failure = false;
|
||||||
let queue = Arc::new(ConfirmationsQueue::default());
|
let queue = Arc::new(ConfirmationsQueue::default());
|
||||||
let builder = ServerBuilder::new(queue, env::temp_dir());
|
let builder = ServerBuilder::new(queue, path.to_path_buf());
|
||||||
let port = 35000 + rand::random::<usize>() % 10000;
|
let port = 35000 + rand::random::<usize>() % 10000;
|
||||||
let res = builder.start(format!("127.0.0.1:{}", port).parse().unwrap()).unwrap();
|
let res = builder.start(format!("127.0.0.1:{}", port).parse().unwrap()).unwrap();
|
||||||
thread::sleep(Duration::from_millis(25));
|
thread::sleep(Duration::from_millis(25));
|
||||||
res
|
|
||||||
|
(res, port, GuardedAuthCodes {
|
||||||
|
authcodes: AuthCodes::from_file(&path).unwrap(),
|
||||||
|
path: path,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn request(server: Server, request: &str) -> http_client::Response {
|
pub fn request(server: Server, request: &str) -> http_client::Response {
|
||||||
@ -41,7 +65,7 @@ pub fn request(server: Server, request: &str) -> http_client::Response {
|
|||||||
#[test]
|
#[test]
|
||||||
fn should_reject_invalid_host() {
|
fn should_reject_invalid_host() {
|
||||||
// given
|
// given
|
||||||
let server = serve();
|
let server = serve().0;
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let response = request(server,
|
let response = request(server,
|
||||||
@ -62,7 +86,7 @@ fn should_reject_invalid_host() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn should_serve_styles_even_on_disallowed_domain() {
|
fn should_serve_styles_even_on_disallowed_domain() {
|
||||||
// given
|
// given
|
||||||
let server = serve();
|
let server = serve().0;
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let response = request(server,
|
let response = request(server,
|
||||||
@ -79,3 +103,103 @@ fn should_serve_styles_even_on_disallowed_domain() {
|
|||||||
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
|
assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_block_if_authorization_is_incorrect() {
|
||||||
|
// given
|
||||||
|
let (server, port, _) = serve();
|
||||||
|
|
||||||
|
// when
|
||||||
|
let response = request(server,
|
||||||
|
&format!("\
|
||||||
|
GET / HTTP/1.1\r\n\
|
||||||
|
Host: 127.0.0.1:{}\r\n\
|
||||||
|
Connection: Upgrade\r\n\
|
||||||
|
Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\
|
||||||
|
Sec-WebSocket-Protocol: wrong\r\n\
|
||||||
|
Sec-WebSocket-Version: 13\r\n\
|
||||||
|
\r\n\
|
||||||
|
{{}}
|
||||||
|
", port)
|
||||||
|
);
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_allow_if_authorization_is_correct() {
|
||||||
|
// given
|
||||||
|
let (server, port, mut authcodes) = serve();
|
||||||
|
let code = authcodes.generate_new().unwrap().replace("-", "");
|
||||||
|
authcodes.to_file(&authcodes.path).unwrap();
|
||||||
|
let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
|
|
||||||
|
// when
|
||||||
|
let response = request(server,
|
||||||
|
&format!("\
|
||||||
|
GET / HTTP/1.1\r\n\
|
||||||
|
Host: 127.0.0.1:{}\r\n\
|
||||||
|
Connection: Close\r\n\
|
||||||
|
Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\
|
||||||
|
Sec-WebSocket-Protocol: {:?}_{}\r\n\
|
||||||
|
Sec-WebSocket-Version: 13\r\n\
|
||||||
|
\r\n\
|
||||||
|
{{}}
|
||||||
|
",
|
||||||
|
port,
|
||||||
|
format!("{}:{}", code, timestamp).sha3(),
|
||||||
|
timestamp,
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(response.status, "HTTP/1.1 101 Switching Protocols".to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_allow_initial_connection_but_only_once() {
|
||||||
|
// given
|
||||||
|
let (server, port, authcodes) = serve();
|
||||||
|
let code = "initial";
|
||||||
|
let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
|
assert!(authcodes.is_empty());
|
||||||
|
|
||||||
|
// when
|
||||||
|
let response1 = http_client::request(server.addr(),
|
||||||
|
&format!("\
|
||||||
|
GET / HTTP/1.1\r\n\
|
||||||
|
Host: 127.0.0.1:{}\r\n\
|
||||||
|
Connection: Close\r\n\
|
||||||
|
Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\
|
||||||
|
Sec-WebSocket-Protocol:{:?}_{}\r\n\
|
||||||
|
Sec-WebSocket-Version: 13\r\n\
|
||||||
|
\r\n\
|
||||||
|
{{}}
|
||||||
|
",
|
||||||
|
port,
|
||||||
|
format!("{}:{}", code, timestamp).sha3(),
|
||||||
|
timestamp,
|
||||||
|
)
|
||||||
|
);
|
||||||
|
let response2 = http_client::request(server.addr(),
|
||||||
|
&format!("\
|
||||||
|
GET / HTTP/1.1\r\n\
|
||||||
|
Host: 127.0.0.1:{}\r\n\
|
||||||
|
Connection: Close\r\n\
|
||||||
|
Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\
|
||||||
|
Sec-WebSocket-Protocol:{:?}_{}\r\n\
|
||||||
|
Sec-WebSocket-Version: 13\r\n\
|
||||||
|
\r\n\
|
||||||
|
{{}}
|
||||||
|
",
|
||||||
|
port,
|
||||||
|
format!("{}:{}", code, timestamp).sha3(),
|
||||||
|
timestamp,
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(response1.status, "HTTP/1.1 101 Switching Protocols".to_owned());
|
||||||
|
assert_eq!(response2.status, "HTTP/1.1 403 FORBIDDEN".to_owned());
|
||||||
|
}
|
||||||
|
@ -180,7 +180,6 @@ impl Drop for Server {
|
|||||||
self.queue.finish();
|
self.queue.finish();
|
||||||
self.broadcaster_handle.take().unwrap().join().unwrap();
|
self.broadcaster_handle.take().unwrap().join().unwrap();
|
||||||
self.handle.take().unwrap().join().unwrap();
|
self.handle.take().unwrap().join().unwrap();
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_is_valid(codes: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
|
fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
|
||||||
match protocols {
|
match protocols {
|
||||||
Ok(ref protocols) if protocols.len() == 1 => {
|
Ok(ref protocols) if protocols.len() == 1 => {
|
||||||
protocols.iter().any(|protocol| {
|
protocols.iter().any(|protocol| {
|
||||||
@ -69,8 +69,15 @@ fn auth_is_valid(codes: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
|
|||||||
|
|
||||||
if let (Some(auth), Some(time)) = (auth, time) {
|
if let (Some(auth), Some(time)) = (auth, time) {
|
||||||
// Check if the code is valid
|
// Check if the code is valid
|
||||||
AuthCodes::from_file(codes)
|
AuthCodes::from_file(codes_path)
|
||||||
.map(|codes| codes.is_valid(&auth, time))
|
.map(|mut codes| {
|
||||||
|
let res = codes.is_valid(&auth, time);
|
||||||
|
// make sure to save back authcodes - it might have been modified
|
||||||
|
if let Err(_) = codes.to_file(codes_path) {
|
||||||
|
warn!(target: "signer", "Couldn't save authorization codes to file.");
|
||||||
|
}
|
||||||
|
res
|
||||||
|
})
|
||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
|
@ -233,7 +233,7 @@ impl BlockCollection {
|
|||||||
fn insert_body(&mut self, b: Bytes) -> Result<(), NetworkError> {
|
fn insert_body(&mut self, b: Bytes) -> Result<(), NetworkError> {
|
||||||
let body = UntrustedRlp::new(&b);
|
let body = UntrustedRlp::new(&b);
|
||||||
let tx = try!(body.at(0));
|
let tx = try!(body.at(0));
|
||||||
let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here
|
let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here
|
||||||
let uncles = try!(body.at(1)).as_raw().sha3();
|
let uncles = try!(body.at(1)).as_raw().sha3();
|
||||||
let header_id = HeaderId {
|
let header_id = HeaderId {
|
||||||
transactions_root: tx_root,
|
transactions_root: tx_root,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user