Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e298ab30e4 | ||
|
|
63d3a4dd59 | ||
|
|
0e55b6c6c9 | ||
|
|
4d115987cb | ||
|
|
2d623f14db | ||
|
|
1d1a3b9d02 | ||
|
|
1d69b0e124 | ||
|
|
3c59475be6 | ||
|
|
facab31551 | ||
|
|
c9cfcd2728 | ||
|
|
557dbb73b8 | ||
|
|
6f772a28e2 | ||
|
|
16f3119547 | ||
|
|
c3741640f7 | ||
|
|
decca7f698 | ||
|
|
fc53726c7f | ||
|
|
e2b81bc08d | ||
|
|
4d2d1eac1f | ||
|
|
5572e3dd1d | ||
|
|
0db1c0d336 | ||
|
|
905f04dc9d | ||
|
|
f560cf360c | ||
|
|
f5d48cbf2a | ||
|
|
d59e2ecbc6 | ||
|
|
a97ca5aaf4 | ||
|
|
df61b1b328 | ||
|
|
52a69d19e6 | ||
|
|
33084aaa07 |
@@ -1,7 +1,6 @@
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
- deploy
|
||||
variables:
|
||||
GIT_DEPTH: "3"
|
||||
SIMPLECOV: "true"
|
||||
@@ -18,16 +17,39 @@ linux-stable:
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- export
|
||||
- cargo build --release --verbose
|
||||
- strip target/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-stable
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "stable-x86_64-unknown-linux-gnu_parity"
|
||||
linux-stable-14.04:
|
||||
stage: build
|
||||
image: ethcore/rust-14.04:latest
|
||||
only:
|
||||
- master
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- cargo build --release --verbose
|
||||
- strip target/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-14.04
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "stable-x86_64-unknown-ubuntu_14_04-gnu_parity"
|
||||
linux-beta:
|
||||
stage: build
|
||||
image: ethcore/rust:beta
|
||||
@@ -37,17 +59,16 @@ linux-beta:
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- export
|
||||
- cargo build --release --verbose
|
||||
- strip target/release/parity
|
||||
- cp target/release/parity parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-beta
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "beta-x86_64-unknown-linux-gnu_parity"
|
||||
allow_failure: true
|
||||
linux-nightly:
|
||||
stage: build
|
||||
image: ethcore/rust:nightly
|
||||
@@ -65,7 +86,7 @@ linux-nightly:
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "nigthly-x86_64-unknown-linux-gnu_parity"
|
||||
allow_failure: true
|
||||
linux-centos:
|
||||
stage: build
|
||||
@@ -80,13 +101,16 @@ linux-centos:
|
||||
- export CC="gcc"
|
||||
- cargo build --release --verbose
|
||||
- strip target/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-centos
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "x86_64-unknown-centos-gnu_parity"
|
||||
linux-armv7:
|
||||
stage: build
|
||||
image: ethcore/rust-armv7:latest
|
||||
@@ -96,7 +120,6 @@ linux-armv7:
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- export
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
|
||||
@@ -104,13 +127,16 @@ linux-armv7:
|
||||
- cat .cargo/config
|
||||
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
|
||||
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/armv7-unknown-linux-gnueabihf/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "armv7_unknown_linux_gnueabihf_parity"
|
||||
allow_failure: true
|
||||
linux-arm:
|
||||
stage: build
|
||||
@@ -121,7 +147,6 @@ linux-arm:
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- export
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
|
||||
@@ -129,13 +154,16 @@ linux-arm:
|
||||
- cat .cargo/config
|
||||
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose
|
||||
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/arm-unknown-linux-gnueabihf/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "arm-unknown-linux-gnueabihf_parity"
|
||||
allow_failure: true
|
||||
linux-armv6:
|
||||
stage: build
|
||||
@@ -146,7 +174,6 @@ linux-armv6:
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- export
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
|
||||
@@ -154,13 +181,16 @@ linux-armv6:
|
||||
- cat .cargo/config
|
||||
- cargo build --target arm-unknown-linux-gnueabi --release --verbose
|
||||
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/arm-unknown-linux-gnueabi/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "arm-unknown-linux-gnueabi_parity"
|
||||
allow_failure: true
|
||||
linux-aarch64:
|
||||
stage: build
|
||||
@@ -171,7 +201,6 @@ linux-aarch64:
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- export
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
|
||||
@@ -179,13 +208,16 @@ linux-aarch64:
|
||||
- cat .cargo/config
|
||||
- cargo build --target aarch64-unknown-linux-gnu --release --verbose
|
||||
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/aarch64-unknown-linux-gnu/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "aarch64-unknown-linux-gnu_parity"
|
||||
allow_failure: true
|
||||
darwin:
|
||||
stage: build
|
||||
@@ -196,12 +228,15 @@ darwin:
|
||||
- stable
|
||||
script:
|
||||
- cargo build --release --verbose
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity
|
||||
tags:
|
||||
- osx
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "x86_64-apple-darwin_parity"
|
||||
windows:
|
||||
stage: build
|
||||
only:
|
||||
@@ -215,29 +250,25 @@ windows:
|
||||
- set RUST_BACKTRACE=1
|
||||
- rustup default stable-x86_64-pc-windows-msvc
|
||||
- cargo build --release --verbose
|
||||
- aws configure set aws_access_key_id %s3_key%
|
||||
- aws configure set aws_secret_access_key %s3_secret%
|
||||
- aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe
|
||||
- aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb
|
||||
tags:
|
||||
- rust-windows
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity.exe
|
||||
- target/release/parity.pdb
|
||||
name: "${CI_BUILD_NAME}_parity"
|
||||
name: "x86_64-pc-windows-msvc_parity"
|
||||
test-linux:
|
||||
stage: test
|
||||
before_script:
|
||||
- git submodule update --init --recursive
|
||||
script:
|
||||
- export RUST_BACKTRACE=1
|
||||
- ./test.sh --verbose
|
||||
tags:
|
||||
- rust-test
|
||||
dependencies:
|
||||
- linux-stable
|
||||
deploy-binaries:
|
||||
stage: deploy
|
||||
only:
|
||||
- master
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
script:
|
||||
- ll
|
||||
|
||||
65
Cargo.lock
generated
65
Cargo.lock
generated
@@ -1,6 +1,6 @@
|
||||
[root]
|
||||
name = "parity"
|
||||
version = "1.3.1"
|
||||
version = "1.3.2"
|
||||
dependencies = [
|
||||
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -20,7 +20,7 @@ dependencies = [
|
||||
"ethcore-logger 1.3.0",
|
||||
"ethcore-rpc 1.3.0",
|
||||
"ethcore-signer 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"ethsync 1.3.0",
|
||||
"fdlimit 0.1.0",
|
||||
"hyper 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -112,6 +112,20 @@ name = "bloomchain"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bloomfilter"
|
||||
version = "0.0.10"
|
||||
source = "git+https://github.com/ethcore/rust-bloom-filter#e66a3f20443bc78810877390ad4da00ebdf74d78"
|
||||
dependencies = [
|
||||
"bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "0.3.0"
|
||||
@@ -244,6 +258,8 @@ version = "1.3.0"
|
||||
dependencies = [
|
||||
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bloomfilter 0.0.10 (git+https://github.com/ethcore/rust-bloom-filter)",
|
||||
"byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"clippy 0.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -253,13 +269,15 @@ dependencies = [
|
||||
"ethcore-ipc 1.3.0",
|
||||
"ethcore-ipc-codegen 1.3.0",
|
||||
"ethcore-ipc-nano 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"ethjson 0.1.0",
|
||||
"ethstore 0.1.0",
|
||||
"evmjit 1.3.0",
|
||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -275,7 +293,7 @@ version = "1.3.0"
|
||||
dependencies = [
|
||||
"clippy 0.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore-rpc 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
||||
"jsonrpc-core 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
|
||||
@@ -317,7 +335,7 @@ name = "ethcore-ipc"
|
||||
version = "1.3.0"
|
||||
dependencies = [
|
||||
"ethcore-devtools 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@@ -362,7 +380,7 @@ dependencies = [
|
||||
"ethcore-ipc 1.3.0",
|
||||
"ethcore-ipc-codegen 1.3.0",
|
||||
"ethcore-ipc-nano 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -374,7 +392,7 @@ name = "ethcore-logger"
|
||||
version = "1.3.0"
|
||||
dependencies = [
|
||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -389,7 +407,7 @@ dependencies = [
|
||||
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore-devtools 1.3.0",
|
||||
"ethcore-io 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -413,7 +431,7 @@ dependencies = [
|
||||
"ethcore-devtools 1.3.0",
|
||||
"ethcore-io 1.3.0",
|
||||
"ethcore-ipc 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"ethjson 0.1.0",
|
||||
"ethsync 1.3.0",
|
||||
"json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)",
|
||||
@@ -436,7 +454,7 @@ dependencies = [
|
||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ethcore-io 1.3.0",
|
||||
"ethcore-rpc 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"jsonrpc-core 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-dapps-signer 1.4.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||
@@ -447,7 +465,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ethcore-util"
|
||||
version = "1.3.1"
|
||||
version = "1.3.2"
|
||||
dependencies = [
|
||||
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -480,7 +498,7 @@ dependencies = [
|
||||
name = "ethjson"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -528,7 +546,7 @@ dependencies = [
|
||||
"ethcore-ipc-codegen 1.3.0",
|
||||
"ethcore-ipc-nano 1.3.0",
|
||||
"ethcore-network 1.3.0",
|
||||
"ethcore-util 1.3.1",
|
||||
"ethcore-util 1.3.2",
|
||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -537,6 +555,13 @@ dependencies = [
|
||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "evmjit"
|
||||
version = "1.3.0"
|
||||
dependencies = [
|
||||
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fdlimit"
|
||||
version = "0.1.0"
|
||||
@@ -738,11 +763,24 @@ name = "libc"
|
||||
version = "0.2.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "linked-hash-map"
|
||||
version = "0.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "lru-cache"
|
||||
version = "0.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "matches"
|
||||
version = "0.1.2"
|
||||
@@ -1628,6 +1666,7 @@ dependencies = [
|
||||
"checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f"
|
||||
"checksum libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "97def9dc7ce1d8e153e693e3a33020bc69972181adb2f871e87e888876feae49"
|
||||
"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
|
||||
"checksum lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "42d50dcb5d9f145df83b1043207e1ac0c37c9c779c4e128ca4655abc3f3cbf8c"
|
||||
"checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e"
|
||||
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
|
||||
"checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
description = "Ethcore client."
|
||||
name = "parity"
|
||||
version = "1.3.1"
|
||||
version = "1.3.2"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Ethcore <admin@ethcore.io>"]
|
||||
build = "build.rs"
|
||||
@@ -59,6 +59,7 @@ ui = ["dapps", "ethcore-signer/ui"]
|
||||
use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"]
|
||||
dapps = ["ethcore-dapps"]
|
||||
ipc = ["ethcore/ipc"]
|
||||
jit = ["ethcore/jit"]
|
||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"]
|
||||
json-tests = ["ethcore/json-tests"]
|
||||
|
||||
|
||||
@@ -23,15 +23,9 @@ RUN rustup target add aarch64-unknown-linux-gnu
|
||||
# show backtraces
|
||||
ENV RUST_BACKTRACE 1
|
||||
|
||||
# set compilers
|
||||
ENV CXX aarch64-linux-gnu-g++
|
||||
ENV CC aarch64-linux-gnu-gcc
|
||||
|
||||
# show tools
|
||||
RUN rustc -vV && \
|
||||
cargo -V && \
|
||||
gcc -v &&\
|
||||
g++ -v
|
||||
cargo -V
|
||||
|
||||
# build parity
|
||||
RUN git clone https://github.com/ethcore/parity && \
|
||||
|
||||
@@ -23,15 +23,9 @@ RUN rustup target add armv7-unknown-linux-gnueabihf
|
||||
# show backtraces
|
||||
ENV RUST_BACKTRACE 1
|
||||
|
||||
# set compilers
|
||||
ENV CXX arm-linux-gnueabihf-g++
|
||||
ENV CC arm-linux-gnueabihf-gcc
|
||||
|
||||
# show tools
|
||||
RUN rustc -vV && \
|
||||
cargo -V && \
|
||||
gcc -v &&\
|
||||
g++ -v
|
||||
cargo -V
|
||||
|
||||
# build parity
|
||||
RUN git clone https://github.com/ethcore/parity && \
|
||||
|
||||
@@ -35,6 +35,9 @@ ethcore-ipc = { path = "../ipc/rpc" }
|
||||
ethstore = { path = "../ethstore" }
|
||||
ethcore-ipc-nano = { path = "../ipc/nano" }
|
||||
rand = "0.3"
|
||||
lru-cache = "0.0.7"
|
||||
bloomfilter = { git = "https://github.com/ethcore/rust-bloom-filter" }
|
||||
byteorder = "0.5"
|
||||
|
||||
[dependencies.hyper]
|
||||
git = "https://github.com/ethcore/hyper"
|
||||
|
||||
@@ -20,11 +20,13 @@ use std::collections::hash_map::Entry;
|
||||
use util::*;
|
||||
use pod_account::*;
|
||||
use account_db::*;
|
||||
use lru_cache::LruCache;
|
||||
|
||||
use std::cell::{Ref, RefCell, Cell};
|
||||
use std::cell::{RefCell, Cell};
|
||||
|
||||
const STORAGE_CACHE_ITEMS: usize = 4096;
|
||||
|
||||
/// Single account in the system.
|
||||
#[derive(Clone)]
|
||||
pub struct Account {
|
||||
// Balance of the account.
|
||||
balance: U256,
|
||||
@@ -32,10 +34,16 @@ pub struct Account {
|
||||
nonce: U256,
|
||||
// Trie-backed storage.
|
||||
storage_root: H256,
|
||||
// Overlay on trie-backed storage - tuple is (<clean>, <value>).
|
||||
storage_overlay: RefCell<HashMap<H256, (Filth, H256)>>,
|
||||
// LRU Cache of the trie-backed storage.
|
||||
// This is limited to `STORAGE_CACHE_ITEMS` recent queries
|
||||
storage_cache: RefCell<LruCache<H256, H256>>,
|
||||
// Modified storage. Accumulates changes to storage made in `set_storage`
|
||||
// Takes precedence over `storage_cache`.
|
||||
storage_changes: HashMap<H256, H256>,
|
||||
// Code hash of the account. If None, means that it's a contract whose code has not yet been set.
|
||||
code_hash: Option<H256>,
|
||||
// Size of the accoun code.
|
||||
code_size: Option<u64>,
|
||||
// Code cache of the account.
|
||||
code_cache: Bytes,
|
||||
// Account is new or has been modified
|
||||
@@ -52,23 +60,31 @@ impl Account {
|
||||
balance: balance,
|
||||
nonce: nonce,
|
||||
storage_root: SHA3_NULL_RLP,
|
||||
storage_overlay: RefCell::new(storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()),
|
||||
storage_cache: Self::empty_storage_cache(),
|
||||
storage_changes: storage,
|
||||
code_hash: Some(code.sha3()),
|
||||
code_size: Some(code.len() as u64),
|
||||
code_cache: code,
|
||||
filth: Filth::Dirty,
|
||||
address_hash: Cell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn empty_storage_cache() -> RefCell<LruCache<H256, H256>> {
|
||||
RefCell::new(LruCache::new(STORAGE_CACHE_ITEMS))
|
||||
}
|
||||
|
||||
/// General constructor.
|
||||
pub fn from_pod(pod: PodAccount) -> Account {
|
||||
Account {
|
||||
balance: pod.balance,
|
||||
nonce: pod.nonce,
|
||||
storage_root: SHA3_NULL_RLP,
|
||||
storage_overlay: RefCell::new(pod.storage.into_iter().map(|(k, v)| (k, (Filth::Dirty, v))).collect()),
|
||||
storage_cache: Self::empty_storage_cache(),
|
||||
storage_changes: pod.storage.into_iter().collect(),
|
||||
code_hash: pod.code.as_ref().map(|c| c.sha3()),
|
||||
code_cache: pod.code.as_ref().map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c.clone()),
|
||||
code_size: Some(pod.code.as_ref().map_or(0, |c| c.len() as u64)),
|
||||
code_cache: pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c),
|
||||
filth: Filth::Dirty,
|
||||
address_hash: Cell::new(None),
|
||||
}
|
||||
@@ -80,9 +96,11 @@ impl Account {
|
||||
balance: balance,
|
||||
nonce: nonce,
|
||||
storage_root: SHA3_NULL_RLP,
|
||||
storage_overlay: RefCell::new(HashMap::new()),
|
||||
storage_cache: Self::empty_storage_cache(),
|
||||
storage_changes: HashMap::new(),
|
||||
code_hash: Some(SHA3_EMPTY),
|
||||
code_cache: vec![],
|
||||
code_size: Some(0),
|
||||
filth: Filth::Dirty,
|
||||
address_hash: Cell::new(None),
|
||||
}
|
||||
@@ -95,9 +113,11 @@ impl Account {
|
||||
nonce: r.val_at(0),
|
||||
balance: r.val_at(1),
|
||||
storage_root: r.val_at(2),
|
||||
storage_overlay: RefCell::new(HashMap::new()),
|
||||
storage_cache: Self::empty_storage_cache(),
|
||||
storage_changes: HashMap::new(),
|
||||
code_hash: Some(r.val_at(3)),
|
||||
code_cache: vec![],
|
||||
code_size: None,
|
||||
filth: Filth::Clean,
|
||||
address_hash: Cell::new(None),
|
||||
}
|
||||
@@ -110,9 +130,11 @@ impl Account {
|
||||
balance: balance,
|
||||
nonce: nonce,
|
||||
storage_root: SHA3_NULL_RLP,
|
||||
storage_overlay: RefCell::new(HashMap::new()),
|
||||
storage_cache: Self::empty_storage_cache(),
|
||||
storage_changes: HashMap::new(),
|
||||
code_hash: None,
|
||||
code_cache: vec![],
|
||||
code_size: None,
|
||||
filth: Filth::Dirty,
|
||||
address_hash: Cell::new(None),
|
||||
}
|
||||
@@ -123,44 +145,62 @@ impl Account {
|
||||
pub fn init_code(&mut self, code: Bytes) {
|
||||
assert!(self.code_hash.is_none());
|
||||
self.code_cache = code;
|
||||
self.code_size = Some(self.code_cache.len() as u64);
|
||||
self.filth = Filth::Dirty;
|
||||
}
|
||||
|
||||
/// Reset this account's code to the given code.
|
||||
pub fn reset_code(&mut self, code: Bytes) {
|
||||
self.code_hash = None;
|
||||
self.code_size = Some(0);
|
||||
self.init_code(code);
|
||||
}
|
||||
|
||||
/// Set (and cache) the contents of the trie's storage at `key` to `value`.
|
||||
pub fn set_storage(&mut self, key: H256, value: H256) {
|
||||
match self.storage_overlay.borrow_mut().entry(key) {
|
||||
Entry::Occupied(ref mut entry) if entry.get().1 != value => {
|
||||
entry.insert((Filth::Dirty, value));
|
||||
match self.storage_changes.entry(key) {
|
||||
Entry::Occupied(ref mut entry) if entry.get() != &value => {
|
||||
entry.insert(value);
|
||||
self.filth = Filth::Dirty;
|
||||
},
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert((Filth::Dirty, value));
|
||||
entry.insert(value);
|
||||
self.filth = Filth::Dirty;
|
||||
},
|
||||
_ => (),
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// Get (and cache) the contents of the trie's storage at `key`.
|
||||
/// Takes modifed storage into account.
|
||||
pub fn storage_at(&self, db: &AccountDB, key: &H256) -> H256 {
|
||||
self.storage_overlay.borrow_mut().entry(key.clone()).or_insert_with(||{
|
||||
let db = SecTrieDB::new(db, &self.storage_root)
|
||||
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
|
||||
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
||||
using it will not fail.");
|
||||
if let Some(value) = self.cached_storage_at(key) {
|
||||
return value;
|
||||
}
|
||||
let db = SecTrieDB::new(db, &self.storage_root)
|
||||
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
|
||||
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
||||
using it will not fail.");
|
||||
|
||||
let item: U256 = match db.get(key){
|
||||
Ok(x) => x.map_or_else(U256::zero, decode),
|
||||
Err(e) => panic!("Encountered potential DB corruption: {}", e),
|
||||
};
|
||||
(Filth::Clean, item.into())
|
||||
}).1.clone()
|
||||
let item: U256 = match db.get(key){
|
||||
Ok(x) => x.map_or_else(U256::zero, decode),
|
||||
Err(e) => panic!("Encountered potential DB corruption: {}", e),
|
||||
};
|
||||
let value: H256 = item.into();
|
||||
self.storage_cache.borrow_mut().insert(key.clone(), value.clone());
|
||||
value
|
||||
}
|
||||
|
||||
/// Get cached storage value if any. Returns `None` if the
|
||||
/// key is not in the cache.
|
||||
pub fn cached_storage_at(&self, key: &H256) -> Option<H256> {
|
||||
if let Some(value) = self.storage_changes.get(key) {
|
||||
return Some(value.clone())
|
||||
}
|
||||
if let Some(value) = self.storage_cache.borrow_mut().get_mut(key) {
|
||||
return Some(value.clone())
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// return the balance associated with this account.
|
||||
@@ -196,6 +236,12 @@ impl Account {
|
||||
}
|
||||
}
|
||||
|
||||
/// returns the account's code size. If `None` then the code cache or code size cache isn't available -
|
||||
/// get someone who knows to call `note_code`.
|
||||
pub fn code_size(&self) -> Option<u64> {
|
||||
self.code_size.clone()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Provide a byte array which hashes to the `code_hash`. returns the hash as a result.
|
||||
pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> {
|
||||
@@ -203,6 +249,7 @@ impl Account {
|
||||
match self.code_hash {
|
||||
Some(ref i) if h == *i => {
|
||||
self.code_cache = code;
|
||||
self.code_size = Some(self.code_cache.len() as u64);
|
||||
Ok(())
|
||||
},
|
||||
_ => Err(h)
|
||||
@@ -216,11 +263,12 @@ impl Account {
|
||||
|
||||
/// Is this a new or modified account?
|
||||
pub fn is_dirty(&self) -> bool {
|
||||
self.filth == Filth::Dirty
|
||||
self.filth == Filth::Dirty || !self.storage_is_clean()
|
||||
}
|
||||
|
||||
/// Mark account as clean.
|
||||
pub fn set_clean(&mut self) {
|
||||
assert!(self.storage_is_clean());
|
||||
self.filth = Filth::Clean
|
||||
}
|
||||
|
||||
@@ -231,7 +279,31 @@ impl Account {
|
||||
self.is_cached() ||
|
||||
match self.code_hash {
|
||||
Some(ref h) => match db.get(h) {
|
||||
Some(x) => { self.code_cache = x.to_vec(); true },
|
||||
Some(x) => {
|
||||
self.code_cache = x.to_vec();
|
||||
self.code_size = Some(x.len() as u64);
|
||||
true
|
||||
},
|
||||
_ => {
|
||||
warn!("Failed reverse get of {}", h);
|
||||
false
|
||||
},
|
||||
},
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide a database to get `code_size`. Should not be called if it is a contract without code.
|
||||
pub fn cache_code_size(&mut self, db: &AccountDB) -> bool {
|
||||
// TODO: fill out self.code_cache;
|
||||
trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty());
|
||||
self.code_size.is_some() ||
|
||||
match self.code_hash {
|
||||
Some(ref h) if h != &SHA3_EMPTY => match db.get(h) {
|
||||
Some(x) => {
|
||||
self.code_size = Some(x.len() as u64);
|
||||
true
|
||||
},
|
||||
_ => {
|
||||
warn!("Failed reverse get of {}", h);
|
||||
false
|
||||
@@ -241,16 +313,15 @@ impl Account {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Determine whether there are any un-`commit()`-ed storage-setting operations.
|
||||
pub fn storage_is_clean(&self) -> bool { self.storage_overlay.borrow().iter().find(|&(_, &(f, _))| f == Filth::Dirty).is_none() }
|
||||
pub fn storage_is_clean(&self) -> bool { self.storage_changes.is_empty() }
|
||||
|
||||
#[cfg(test)]
|
||||
/// return the storage root associated with this account or None if it has been altered via the overlay.
|
||||
pub fn storage_root(&self) -> Option<&H256> { if self.storage_is_clean() {Some(&self.storage_root)} else {None} }
|
||||
|
||||
/// return the storage overlay.
|
||||
pub fn storage_overlay(&self) -> Ref<HashMap<H256, (Filth, H256)>> { self.storage_overlay.borrow() }
|
||||
pub fn storage_changes(&self) -> &HashMap<H256, H256> { &self.storage_changes }
|
||||
|
||||
/// Increment the nonce of the account by one.
|
||||
pub fn inc_nonce(&mut self) {
|
||||
@@ -276,26 +347,24 @@ impl Account {
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit the `storage_overlay` to the backing DB and update `storage_root`.
|
||||
/// Commit the `storage_changes` to the backing DB and update `storage_root`.
|
||||
pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut AccountDBMut) {
|
||||
let mut t = trie_factory.from_existing(db, &mut self.storage_root)
|
||||
.expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \
|
||||
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
||||
using it will not fail.");
|
||||
for (k, &mut (ref mut f, ref mut v)) in self.storage_overlay.borrow_mut().iter_mut() {
|
||||
if f == &Filth::Dirty {
|
||||
// cast key and value to trait type,
|
||||
// so we can call overloaded `to_bytes` method
|
||||
let res = match v.is_zero() {
|
||||
true => t.remove(k),
|
||||
false => t.insert(k, &encode(&U256::from(v.as_slice()))),
|
||||
};
|
||||
for (k, v) in self.storage_changes.drain() {
|
||||
// cast key and value to trait type,
|
||||
// so we can call overloaded `to_bytes` method
|
||||
let res = match v.is_zero() {
|
||||
true => t.remove(k.as_slice()),
|
||||
false => t.insert(k.as_slice(), &encode(&U256::from(v.as_slice()))),
|
||||
};
|
||||
|
||||
if let Err(e) = res {
|
||||
warn!("Encountered potential DB corruption: {}", e);
|
||||
}
|
||||
*f = Filth::Clean;
|
||||
if let Err(e) = res {
|
||||
warn!("Encountered potential DB corruption: {}", e);
|
||||
}
|
||||
self.storage_cache.borrow_mut().insert(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,9 +372,13 @@ impl Account {
|
||||
pub fn commit_code(&mut self, db: &mut AccountDBMut) {
|
||||
trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty());
|
||||
match (self.code_hash.is_none(), self.code_cache.is_empty()) {
|
||||
(true, true) => self.code_hash = Some(SHA3_EMPTY),
|
||||
(true, true) => {
|
||||
self.code_hash = Some(SHA3_EMPTY);
|
||||
self.code_size = Some(0);
|
||||
},
|
||||
(true, false) => {
|
||||
self.code_hash = Some(db.insert(&self.code_cache));
|
||||
self.code_size = Some(self.code_cache.len() as u64);
|
||||
},
|
||||
(false, _) => {},
|
||||
}
|
||||
@@ -317,9 +390,57 @@ impl Account {
|
||||
stream.append(&self.nonce);
|
||||
stream.append(&self.balance);
|
||||
stream.append(&self.storage_root);
|
||||
stream.append(self.code_hash.as_ref().expect("Cannot form RLP of contract account without code."));
|
||||
stream.append(self.code_hash.as_ref().unwrap_or(&SHA3_EMPTY));
|
||||
stream.out()
|
||||
}
|
||||
|
||||
/// Clone basic account data
|
||||
pub fn clone_basic(&self) -> Account {
|
||||
Account {
|
||||
balance: self.balance.clone(),
|
||||
nonce: self.nonce.clone(),
|
||||
storage_root: self.storage_root.clone(),
|
||||
storage_cache: Self::empty_storage_cache(),
|
||||
storage_changes: HashMap::new(),
|
||||
code_hash: self.code_hash.clone(),
|
||||
code_size: self.code_size.clone(),
|
||||
code_cache: Bytes::new(),
|
||||
filth: self.filth,
|
||||
address_hash: self.address_hash.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clone account data and dirty storage keys
|
||||
pub fn clone_dirty(&self) -> Account {
|
||||
let mut account = self.clone_basic();
|
||||
account.storage_changes = self.storage_changes.clone();
|
||||
account.code_cache = self.code_cache.clone();
|
||||
account
|
||||
}
|
||||
|
||||
/// Clone account data, dirty storage keys and cached storage keys.
|
||||
pub fn clone_all(&self) -> Account {
|
||||
let mut account = self.clone_dirty();
|
||||
account.storage_cache = self.storage_cache.clone();
|
||||
account
|
||||
}
|
||||
|
||||
/// Replace self with the data from other account merging storage cache
|
||||
pub fn merge_with(&mut self, other: Account) {
|
||||
assert!(self.storage_is_clean());
|
||||
assert!(other.storage_is_clean());
|
||||
self.balance = other.balance;
|
||||
self.nonce = other.nonce;
|
||||
self.storage_root = other.storage_root;
|
||||
self.code_hash = other.code_hash;
|
||||
self.code_cache = other.code_cache;
|
||||
self.code_size = other.code_size;
|
||||
self.address_hash = other.address_hash;
|
||||
let mut cache = self.storage_cache.borrow_mut();
|
||||
for (k, v) in other.storage_cache.into_inner().into_iter() {
|
||||
cache.insert(k.clone() , v.clone()); //TODO: cloning should not be required here
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Account {
|
||||
@@ -415,6 +536,7 @@ mod tests {
|
||||
let mut db = AccountDBMut::new(&mut db, &Address::new());
|
||||
a.init_code(vec![0x55, 0x44, 0xffu8]);
|
||||
assert_eq!(a.code_hash(), SHA3_EMPTY);
|
||||
assert_eq!(a.code_size(), Some(3));
|
||||
a.commit_code(&mut db);
|
||||
assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb");
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
use common::*;
|
||||
use engines::Engine;
|
||||
use state::*;
|
||||
use state_db::StateDB;
|
||||
use verification::PreverifiedBlock;
|
||||
use trace::FlatTrace;
|
||||
use evm::Factory as EvmFactory;
|
||||
@@ -178,7 +179,7 @@ pub trait IsBlock {
|
||||
/// Trait for a object that has a state database.
|
||||
pub trait Drain {
|
||||
/// Drop this object and return the underlieing database.
|
||||
fn drain(self) -> Box<JournalDB>;
|
||||
fn drain(self) -> StateDB;
|
||||
}
|
||||
|
||||
impl IsBlock for ExecutedBlock {
|
||||
@@ -233,7 +234,7 @@ impl<'x> OpenBlock<'x> {
|
||||
vm_factory: &'x EvmFactory,
|
||||
trie_factory: TrieFactory,
|
||||
tracing: bool,
|
||||
db: Box<JournalDB>,
|
||||
db: StateDB,
|
||||
parent: &Header,
|
||||
last_hashes: Arc<LastHashes>,
|
||||
author: Address,
|
||||
@@ -465,7 +466,9 @@ impl LockedBlock {
|
||||
|
||||
impl Drain for LockedBlock {
|
||||
/// Drop this object and return the underlieing database.
|
||||
fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 }
|
||||
fn drain(self) -> StateDB {
|
||||
self.block.state.drop().1
|
||||
}
|
||||
}
|
||||
|
||||
impl SealedBlock {
|
||||
@@ -481,7 +484,9 @@ impl SealedBlock {
|
||||
|
||||
impl Drain for SealedBlock {
|
||||
/// Drop this object and return the underlieing database.
|
||||
fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 }
|
||||
fn drain(self) -> StateDB {
|
||||
self.block.state.drop().1
|
||||
}
|
||||
}
|
||||
|
||||
impl IsBlock for SealedBlock {
|
||||
@@ -496,7 +501,7 @@ pub fn enact(
|
||||
uncles: &[Header],
|
||||
engine: &Engine,
|
||||
tracing: bool,
|
||||
db: Box<JournalDB>,
|
||||
db: StateDB,
|
||||
parent: &Header,
|
||||
last_hashes: Arc<LastHashes>,
|
||||
vm_factory: &EvmFactory,
|
||||
@@ -529,7 +534,7 @@ pub fn enact_bytes(
|
||||
block_bytes: &[u8],
|
||||
engine: &Engine,
|
||||
tracing: bool,
|
||||
db: Box<JournalDB>,
|
||||
db: StateDB,
|
||||
parent: &Header,
|
||||
last_hashes: Arc<LastHashes>,
|
||||
vm_factory: &EvmFactory,
|
||||
@@ -546,7 +551,7 @@ pub fn enact_verified(
|
||||
block: &PreverifiedBlock,
|
||||
engine: &Engine,
|
||||
tracing: bool,
|
||||
db: Box<JournalDB>,
|
||||
db: StateDB,
|
||||
parent: &Header,
|
||||
last_hashes: Arc<LastHashes>,
|
||||
vm_factory: &EvmFactory,
|
||||
@@ -562,7 +567,7 @@ pub fn enact_and_seal(
|
||||
block_bytes: &[u8],
|
||||
engine: &Engine,
|
||||
tracing: bool,
|
||||
db: Box<JournalDB>,
|
||||
db: StateDB,
|
||||
parent: &Header,
|
||||
last_hashes: Arc<LastHashes>,
|
||||
vm_factory: &EvmFactory,
|
||||
@@ -583,9 +588,9 @@ mod tests {
|
||||
use spec::*;
|
||||
let spec = Spec::new_test();
|
||||
let genesis_header = spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let vm_factory = Default::default();
|
||||
let b = OpenBlock::new(&*spec.engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||
@@ -600,9 +605,9 @@ mod tests {
|
||||
let engine = &*spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let vm_factory = Default::default();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
||||
@@ -610,16 +615,16 @@ mod tests {
|
||||
let orig_bytes = b.rlp_bytes();
|
||||
let orig_db = b.drain();
|
||||
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, &Default::default(), Default::default()).unwrap();
|
||||
|
||||
assert_eq!(e.rlp_bytes(), orig_bytes);
|
||||
|
||||
let db = e.drain();
|
||||
assert_eq!(orig_db.keys(), db.keys());
|
||||
assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None);
|
||||
assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys());
|
||||
assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -629,9 +634,9 @@ mod tests {
|
||||
let engine = &*spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let vm_factory = Default::default();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let mut open_block = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||
@@ -646,9 +651,9 @@ mod tests {
|
||||
let orig_bytes = b.rlp_bytes();
|
||||
let orig_db = b.drain();
|
||||
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, &Default::default(), Default::default()).unwrap();
|
||||
|
||||
let bytes = e.rlp_bytes();
|
||||
@@ -657,7 +662,7 @@ mod tests {
|
||||
assert_eq!(uncles[1].extra_data, b"uncle2");
|
||||
|
||||
let db = e.drain();
|
||||
assert_eq!(orig_db.keys(), db.keys());
|
||||
assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None);
|
||||
assert_eq!(orig_db.journal_db().keys(), db.journal_db().keys());
|
||||
assert!(orig_db.journal_db().keys().iter().filter(|k| orig_db.journal_db().get(k.0) != db.journal_db().get(k.0)).next() == None);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ pub struct BlockChain {
|
||||
|
||||
pending_best_block: RwLock<Option<BestBlock>>,
|
||||
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
||||
pending_transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
|
||||
pending_transaction_addresses: RwLock<HashMap<H256, Option<TransactionAddress>>>,
|
||||
}
|
||||
|
||||
impl BlockProvider for BlockChain {
|
||||
@@ -584,8 +584,8 @@ impl BlockChain {
|
||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
||||
block_details: self.prepare_block_details_update(bytes, &info),
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
info: info,
|
||||
block: bytes
|
||||
}, is_best);
|
||||
@@ -618,8 +618,8 @@ impl BlockChain {
|
||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
||||
block_details: update,
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
info: info,
|
||||
block: bytes,
|
||||
}, is_best);
|
||||
@@ -687,8 +687,8 @@ impl BlockChain {
|
||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
||||
block_details: self.prepare_block_details_update(bytes, &info),
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
info: info.clone(),
|
||||
block: bytes,
|
||||
}, true);
|
||||
@@ -744,8 +744,9 @@ impl BlockChain {
|
||||
let mut write_details = self.block_details.write();
|
||||
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite);
|
||||
|
||||
let mut cache_man = self.cache_man.write();
|
||||
for hash in block_hashes.into_iter() {
|
||||
self.note_used(CacheID::BlockDetails(hash));
|
||||
cache_man.note_used(CacheID::BlockDetails(hash));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -780,7 +781,7 @@ impl BlockChain {
|
||||
let mut write_txs = self.pending_transaction_addresses.write();
|
||||
|
||||
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite);
|
||||
batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite);
|
||||
batch.extend_with_option_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -798,18 +799,26 @@ impl BlockChain {
|
||||
*best_block = block;
|
||||
}
|
||||
|
||||
let pending_txs = mem::replace(&mut *pending_write_txs, HashMap::new());
|
||||
let (retracted_txs, enacted_txs) = pending_txs.into_iter().partition::<HashMap<_, _>, _>(|&(_, ref value)| value.is_none());
|
||||
|
||||
let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect();
|
||||
let pending_txs_keys: Vec<_> = pending_write_txs.keys().cloned().collect();
|
||||
let enacted_txs_keys: Vec<_> = enacted_txs.keys().cloned().collect();
|
||||
|
||||
write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new()));
|
||||
write_txs.extend(mem::replace(&mut *pending_write_txs, HashMap::new()));
|
||||
write_txs.extend(enacted_txs.into_iter().map(|(k, v)| (k, v.expect("Transactions were partitioned; qed"))));
|
||||
|
||||
for n in pending_hashes_keys.into_iter() {
|
||||
self.note_used(CacheID::BlockHashes(n));
|
||||
for hash in retracted_txs.keys() {
|
||||
write_txs.remove(hash);
|
||||
}
|
||||
|
||||
for hash in pending_txs_keys.into_iter() {
|
||||
self.note_used(CacheID::TransactionAddresses(hash));
|
||||
let mut cache_man = self.cache_man.write();
|
||||
for n in pending_hashes_keys.into_iter() {
|
||||
cache_man.note_used(CacheID::BlockHashes(n));
|
||||
}
|
||||
|
||||
for hash in enacted_txs_keys {
|
||||
cache_man.note_used(CacheID::TransactionAddresses(hash));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -910,7 +919,7 @@ impl BlockChain {
|
||||
}
|
||||
|
||||
/// This function returns modified transaction addresses.
|
||||
fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, TransactionAddress> {
|
||||
fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, Option<TransactionAddress>> {
|
||||
let block = BlockView::new(block_bytes);
|
||||
let transaction_hashes = block.transaction_hashes();
|
||||
|
||||
@@ -919,10 +928,10 @@ impl BlockChain {
|
||||
transaction_hashes.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i ,tx_hash)| {
|
||||
(tx_hash, TransactionAddress {
|
||||
(tx_hash, Some(TransactionAddress {
|
||||
block_hash: info.hash.clone(),
|
||||
index: i
|
||||
})
|
||||
}))
|
||||
})
|
||||
.collect()
|
||||
},
|
||||
@@ -933,23 +942,30 @@ impl BlockChain {
|
||||
let hashes = BodyView::new(&bytes).transaction_hashes();
|
||||
hashes.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, tx_hash)| (tx_hash, TransactionAddress {
|
||||
.map(|(i, tx_hash)| (tx_hash, Some(TransactionAddress {
|
||||
block_hash: hash.clone(),
|
||||
index: i,
|
||||
}))
|
||||
.collect::<HashMap<H256, TransactionAddress>>()
|
||||
})))
|
||||
.collect::<HashMap<H256, Option<TransactionAddress>>>()
|
||||
});
|
||||
|
||||
let current_addresses = transaction_hashes.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i ,tx_hash)| {
|
||||
(tx_hash, TransactionAddress {
|
||||
(tx_hash, Some(TransactionAddress {
|
||||
block_hash: info.hash.clone(),
|
||||
index: i
|
||||
})
|
||||
}))
|
||||
});
|
||||
|
||||
addresses.chain(current_addresses).collect()
|
||||
let retracted = data.retracted.iter().flat_map(|hash| {
|
||||
let bytes = self.block_body(hash).expect("Retracted block must be in database.");
|
||||
let hashes = BodyView::new(&bytes).transaction_hashes();
|
||||
hashes.into_iter().map(|hash| (hash, None)).collect::<HashMap<H256, Option<TransactionAddress>>>()
|
||||
});
|
||||
|
||||
// The order here is important! Don't remove transaction if it was part of enacted blocks as well.
|
||||
retracted.chain(addresses).chain(current_addresses).collect()
|
||||
},
|
||||
BlockLocation::Branch => HashMap::new(),
|
||||
}
|
||||
@@ -1248,6 +1264,70 @@ mod tests {
|
||||
// TODO: insert block that already includes one of them as an uncle to check it's not allowed.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork_transaction_addresses() {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
||||
let mut fork_chain = canon_chain.fork(1);
|
||||
let mut fork_finalizer = finalizer.fork();
|
||||
|
||||
let t1 = Transaction {
|
||||
nonce: 0.into(),
|
||||
gas_price: 0.into(),
|
||||
gas: 100_000.into(),
|
||||
action: Action::Create,
|
||||
value: 100.into(),
|
||||
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
|
||||
}.sign(&"".sha3());
|
||||
|
||||
|
||||
let b1a = canon_chain
|
||||
.with_transaction(t1.clone())
|
||||
.generate(&mut finalizer).unwrap();
|
||||
|
||||
// Empty block
|
||||
let b1b = fork_chain
|
||||
.generate(&mut fork_finalizer).unwrap();
|
||||
|
||||
let b2 = fork_chain
|
||||
.generate(&mut fork_finalizer).unwrap();
|
||||
|
||||
let b1a_hash = BlockView::new(&b1a).header_view().sha3();
|
||||
let b2_hash = BlockView::new(&b2).header_view().sha3();
|
||||
|
||||
let t1_hash = t1.hash();
|
||||
|
||||
let temp = RandomTempPath::new();
|
||||
let db = new_db(temp.as_str());
|
||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
||||
|
||||
let mut batch = db.transaction();
|
||||
let _ = bc.insert_block(&mut batch, &b1a, vec![]);
|
||||
bc.commit();
|
||||
let _ = bc.insert_block(&mut batch, &b1b, vec![]);
|
||||
bc.commit();
|
||||
db.write(batch).unwrap();
|
||||
|
||||
assert_eq!(bc.best_block_hash(), b1a_hash);
|
||||
assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress {
|
||||
block_hash: b1a_hash.clone(),
|
||||
index: 0,
|
||||
}));
|
||||
|
||||
// now let's make forked chain the canon chain
|
||||
let mut batch = db.transaction();
|
||||
let _ = bc.insert_block(&mut batch, &b2, vec![]);
|
||||
bc.commit();
|
||||
db.write(batch).unwrap();
|
||||
|
||||
// Transaction should be retracted
|
||||
assert_eq!(bc.best_block_hash(), b2_hash);
|
||||
assert_eq!(bc.transaction_address(&t1_hash), None);
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_overwriting_transaction_addresses() {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
@@ -1318,14 +1398,14 @@ mod tests {
|
||||
db.write(batch).unwrap();
|
||||
|
||||
assert_eq!(bc.best_block_hash(), b1a_hash);
|
||||
assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress {
|
||||
assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress {
|
||||
block_hash: b1a_hash.clone(),
|
||||
index: 0,
|
||||
});
|
||||
assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress {
|
||||
}));
|
||||
assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress {
|
||||
block_hash: b1a_hash.clone(),
|
||||
index: 1,
|
||||
});
|
||||
}));
|
||||
|
||||
// now let's make forked chain the canon chain
|
||||
let mut batch = db.transaction();
|
||||
@@ -1334,18 +1414,18 @@ mod tests {
|
||||
db.write(batch).unwrap();
|
||||
|
||||
assert_eq!(bc.best_block_hash(), b2_hash);
|
||||
assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress {
|
||||
assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress {
|
||||
block_hash: b1b_hash.clone(),
|
||||
index: 1,
|
||||
});
|
||||
assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress {
|
||||
}));
|
||||
assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress {
|
||||
block_hash: b1b_hash.clone(),
|
||||
index: 0,
|
||||
});
|
||||
assert_eq!(bc.transaction_address(&t3_hash).unwrap(), TransactionAddress {
|
||||
}));
|
||||
assert_eq!(bc.transaction_address(&t3_hash), Some(TransactionAddress {
|
||||
block_hash: b2_hash.clone(),
|
||||
index: 0,
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -17,8 +17,8 @@ pub struct ExtrasUpdate<'a> {
|
||||
pub block_details: HashMap<H256, BlockDetails>,
|
||||
/// Modified block receipts.
|
||||
pub block_receipts: HashMap<H256, BlockReceipts>,
|
||||
/// Modified transaction addresses.
|
||||
pub transactions_addresses: HashMap<H256, TransactionAddress>,
|
||||
/// Modified blocks blooms.
|
||||
pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>,
|
||||
/// Modified transaction addresses (None signifies removed transactions).
|
||||
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ use time::precise_time_ns;
|
||||
|
||||
// util
|
||||
use util::{journaldb, rlp, Bytes, View, PerfTimer, Itertools, Mutex, RwLock};
|
||||
use util::journaldb::JournalDB;
|
||||
use util::rlp::{UntrustedRlp};
|
||||
use util::numbers::*;
|
||||
use util::sha3::*;
|
||||
@@ -65,6 +64,7 @@ use evm::Factory as EvmFactory;
|
||||
use miner::{Miner, MinerService};
|
||||
use util::TrieFactory;
|
||||
use snapshot::{self, io as snapshot_io};
|
||||
use state_db::StateDB;
|
||||
|
||||
// re-export
|
||||
pub use types::blockchain_info::BlockChainInfo;
|
||||
@@ -124,7 +124,7 @@ pub struct Client {
|
||||
tracedb: Arc<TraceDB<BlockChain>>,
|
||||
engine: Arc<Engine>,
|
||||
db: Arc<Database>,
|
||||
state_db: Mutex<Box<JournalDB>>,
|
||||
state_db: Mutex<StateDB>,
|
||||
block_queue: BlockQueue,
|
||||
report: RwLock<ClientReport>,
|
||||
import_lock: Mutex<()>,
|
||||
@@ -154,8 +154,10 @@ pub const DB_COL_BODIES: Option<u32> = Some(2);
|
||||
pub const DB_COL_EXTRA: Option<u32> = Some(3);
|
||||
/// Column for Traces
|
||||
pub const DB_COL_TRACE: Option<u32> = Some(4);
|
||||
/// Column for Traces
|
||||
pub const DB_COL_ACCOUNT_BLOOM: Option<u32> = Some(5);
|
||||
/// Number of columns in DB
|
||||
pub const DB_NO_OF_COLUMNS: Option<u32> = Some(5);
|
||||
pub const DB_NO_OF_COLUMNS: Option<u32> = Some(6);
|
||||
|
||||
/// Append a path element to the given path and return the string.
|
||||
pub fn append_path<P>(path: P, item: &str) -> String where P: AsRef<Path> {
|
||||
@@ -184,14 +186,15 @@ impl Client {
|
||||
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, db.clone()));
|
||||
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, db.clone(), chain.clone())));
|
||||
|
||||
let mut state_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE);
|
||||
if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) {
|
||||
let journal_db = journaldb::new(db.clone(), config.pruning, DB_COL_STATE);
|
||||
let mut state_db = StateDB::new(journal_db);
|
||||
if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) {
|
||||
let batch = DBTransaction::new(&db);
|
||||
try!(state_db.commit(&batch, 0, &spec.genesis_header().hash(), None));
|
||||
try!(db.write(batch).map_err(ClientError::Database));
|
||||
}
|
||||
|
||||
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.contains(h.state_root())) {
|
||||
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.journal_db().contains(h.state_root())) {
|
||||
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
|
||||
}
|
||||
|
||||
@@ -301,7 +304,8 @@ impl Client {
|
||||
// Enact Verified Block
|
||||
let parent = chain_has_parent.unwrap();
|
||||
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
||||
let db = self.state_db.lock().boxed_clone();
|
||||
let is_canon = header.parent_hash == self.chain.best_block_hash();
|
||||
let db = if is_canon { self.state_db.lock().boxed_clone_canon() } else { self.state_db.lock().boxed_clone() };
|
||||
|
||||
let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone());
|
||||
if let Err(e) = enact_result {
|
||||
@@ -443,7 +447,8 @@ impl Client {
|
||||
// CHECK! I *think* this is fine, even if the state_root is equal to another
|
||||
// already-imported block of the same number.
|
||||
// TODO: Prove it with a test.
|
||||
block.drain().commit(&batch, number, hash, ancient).expect("DB commit failed.");
|
||||
let mut state = block.drain();
|
||||
state.commit(&batch, number, hash, ancient).expect("DB commit failed.");
|
||||
|
||||
let route = self.chain.insert_block(&batch, block_data, receipts);
|
||||
self.tracedb.import(&batch, TraceImportRequest {
|
||||
@@ -456,7 +461,6 @@ impl Client {
|
||||
// Final commit to the DB
|
||||
self.db.write_buffered(batch).expect("DB write failed.");
|
||||
self.chain.commit();
|
||||
|
||||
self.update_last_hashes(&parent, hash);
|
||||
route
|
||||
}
|
||||
@@ -600,7 +604,7 @@ impl Client {
|
||||
/// Take a snapshot at the given block.
|
||||
/// If the ID given is "latest", this will default to 1000 blocks behind.
|
||||
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockID, p: &snapshot::Progress) -> Result<(), ::error::Error> {
|
||||
let db = self.state_db.lock().boxed_clone();
|
||||
let db = self.state_db.lock().journal_db().boxed_clone();
|
||||
let best_block_number = self.chain_info().best_block_number;
|
||||
let block_number = try!(self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at)));
|
||||
|
||||
@@ -881,7 +885,7 @@ impl BlockChainClient for Client {
|
||||
}
|
||||
|
||||
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
||||
self.state_db.lock().state(hash)
|
||||
self.state_db.lock().journal_db().state(hash)
|
||||
}
|
||||
|
||||
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
||||
@@ -1080,6 +1084,8 @@ impl MiningBlockChainClient for Client {
|
||||
let number = block.header().number();
|
||||
|
||||
let block_data = block.rlp_bytes();
|
||||
// Clear canonical state cache
|
||||
self.state_db.lock().clear_cache();
|
||||
let route = self.commit_block(block, &h, &block_data);
|
||||
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
|
||||
|
||||
|
||||
@@ -23,7 +23,8 @@ use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action};
|
||||
use blockchain::TreeRoute;
|
||||
use client::{
|
||||
BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID,
|
||||
TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError
|
||||
TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError,
|
||||
DB_NO_OF_COLUMNS, DB_COL_STATE,
|
||||
};
|
||||
use header::{Header as BlockHeader, BlockNumber};
|
||||
use filter::Filter;
|
||||
@@ -40,6 +41,7 @@ use block::{OpenBlock, SealedBlock};
|
||||
use executive::Executed;
|
||||
use error::CallError;
|
||||
use trace::LocalizedTrace;
|
||||
use state_db::StateDB;
|
||||
|
||||
/// Test client.
|
||||
pub struct TestBlockChainClient {
|
||||
@@ -247,13 +249,14 @@ impl TestBlockChainClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
|
||||
pub fn get_temp_state_db() -> GuardedTempResult<StateDB> {
|
||||
let temp = RandomTempPath::new();
|
||||
let db = Database::open_default(temp.as_str()).unwrap();
|
||||
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None);
|
||||
let db = Database::open(&DatabaseConfig::with_columns(DB_NO_OF_COLUMNS), temp.as_str()).unwrap();
|
||||
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, DB_COL_STATE);
|
||||
let state_db = StateDB::new(journal_db);
|
||||
GuardedTempResult {
|
||||
_temp: temp,
|
||||
result: Some(journal_db)
|
||||
result: Some(state_db)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -261,9 +264,9 @@ impl MiningBlockChainClient for TestBlockChainClient {
|
||||
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
|
||||
let engine = &*self.spec.engine;
|
||||
let genesis_header = self.spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
self.spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
self.spec.ensure_db_good(&mut db).unwrap();
|
||||
|
||||
let last_hashes = vec![genesis_header.hash()];
|
||||
let mut open_block = OpenBlock::new(
|
||||
@@ -348,11 +351,11 @@ impl BlockChainClient for TestBlockChainClient {
|
||||
}
|
||||
|
||||
fn transaction(&self, _id: TransactionID) -> Option<LocalizedTransaction> {
|
||||
unimplemented!();
|
||||
None
|
||||
}
|
||||
|
||||
fn uncle(&self, _id: UncleID) -> Option<Bytes> {
|
||||
unimplemented!();
|
||||
None
|
||||
}
|
||||
|
||||
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
|
||||
|
||||
@@ -64,6 +64,9 @@ pub trait Writable {
|
||||
/// Writes the value into the database.
|
||||
fn write<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>, value: &T) where T: Encodable, R: Deref<Target = [u8]>;
|
||||
|
||||
/// Deletes key from the databse.
|
||||
fn delete<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) where T: Encodable, R: Deref<Target = [u8]>;
|
||||
|
||||
/// Writes the value into the database and updates the cache.
|
||||
fn write_with_cache<K, T, R>(&self, col: Option<u32>, cache: &mut Cache<K, T>, key: K, value: T, policy: CacheUpdatePolicy) where
|
||||
K: Key<T, Target = R> + Hash + Eq,
|
||||
@@ -100,6 +103,34 @@ pub trait Writable {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes and removes the values into the database and updates the cache.
|
||||
fn extend_with_option_cache<K, T, R>(&self, col: Option<u32>, cache: &mut Cache<K, Option<T>>, values: HashMap<K, Option<T>>, policy: CacheUpdatePolicy) where
|
||||
K: Key<T, Target = R> + Hash + Eq,
|
||||
T: Encodable,
|
||||
R: Deref<Target = [u8]> {
|
||||
match policy {
|
||||
CacheUpdatePolicy::Overwrite => {
|
||||
for (key, value) in values.into_iter() {
|
||||
match value {
|
||||
Some(ref v) => self.write(col, &key, v),
|
||||
None => self.delete(col, &key),
|
||||
}
|
||||
cache.insert(key, value);
|
||||
}
|
||||
},
|
||||
CacheUpdatePolicy::Remove => {
|
||||
for (key, value) in values.into_iter() {
|
||||
match value {
|
||||
Some(v) => self.write(col, &key, &v),
|
||||
None => self.delete(col, &key),
|
||||
}
|
||||
cache.remove(&key);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Should be used to read values from database.
|
||||
@@ -154,6 +185,13 @@ impl Writable for DBTransaction {
|
||||
panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
|
||||
}
|
||||
}
|
||||
|
||||
fn delete<T, R>(&self, col: Option<u32>, key: &Key<T, Target = R>) where T: Encodable, R: Deref<Target = [u8]> {
|
||||
let result = DBTransaction::delete(self, col, &key.key());
|
||||
if let Err(err) = result {
|
||||
panic!("db delete failed, key: {:?}, err: {:?}", &key.key() as &[u8], err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for Database {
|
||||
|
||||
@@ -248,9 +248,9 @@ mod tests {
|
||||
let spec = new_test_authority();
|
||||
let engine = &*spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let vm_factory = Default::default();
|
||||
let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||
|
||||
@@ -82,9 +82,9 @@ mod tests {
|
||||
let spec = new_test_instant();
|
||||
let engine = &*spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let vm_factory = Default::default();
|
||||
let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||
|
||||
@@ -353,9 +353,9 @@ mod tests {
|
||||
let spec = new_morden();
|
||||
let engine = &*spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let vm_factory = Default::default();
|
||||
let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||
@@ -368,9 +368,9 @@ mod tests {
|
||||
let spec = new_morden();
|
||||
let engine = &*spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||
let vm_factory = Default::default();
|
||||
let mut b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||
|
||||
@@ -65,9 +65,9 @@ mod tests {
|
||||
let spec = new_morden();
|
||||
let engine = &spec.engine;
|
||||
let genesis_header = spec.genesis_header();
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
spec.ensure_db_good(&mut db).unwrap();
|
||||
let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce(), Default::default()).unwrap();
|
||||
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64));
|
||||
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64));
|
||||
|
||||
@@ -83,6 +83,9 @@ pub trait Ext {
|
||||
/// Returns code at given address
|
||||
fn extcode(&self, address: &Address) -> Bytes;
|
||||
|
||||
/// Returns code length in bytes at given address
|
||||
fn extcode_len(&self, address: &Address) -> u64;
|
||||
|
||||
/// Creates log entry with given topics and data
|
||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]);
|
||||
|
||||
|
||||
@@ -53,6 +53,17 @@ fn color(instruction: Instruction, name: &'static str) -> String {
|
||||
type CodePosition = usize;
|
||||
type ProgramCounter = usize;
|
||||
|
||||
const ONE: U256 = U256([1, 0, 0, 0]);
|
||||
const TWO: U256 = U256([2, 0, 0, 0]);
|
||||
const TWO_POW_5: U256 = U256([0x20, 0, 0, 0]);
|
||||
const TWO_POW_8: U256 = U256([0x100, 0, 0, 0]);
|
||||
const TWO_POW_16: U256 = U256([0x10000, 0, 0, 0]);
|
||||
const TWO_POW_24: U256 = U256([0x1000000, 0, 0, 0]);
|
||||
const TWO_POW_64: U256 = U256([0, 0x1, 0, 0]); // 0x1 00000000 00000000
|
||||
const TWO_POW_96: U256 = U256([0, 0x100000000, 0, 0]); //0x1 00000000 00000000 00000000
|
||||
const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000
|
||||
const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000
|
||||
|
||||
/// Abstraction over raw vector of Bytes. Easier state management of PC.
|
||||
struct CodeReader<'a> {
|
||||
position: ProgramCounter,
|
||||
@@ -471,7 +482,7 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
},
|
||||
instructions::EXTCODESIZE => {
|
||||
let address = u256_to_address(&stack.pop_back());
|
||||
let len = ext.extcode(&address).len();
|
||||
let len = ext.extcode_len(&address);
|
||||
stack.push(U256::from(len));
|
||||
},
|
||||
instructions::CALLDATACOPY => {
|
||||
@@ -599,7 +610,19 @@ impl<Cost: CostType> Interpreter<Cost> {
|
||||
let a = stack.pop_back();
|
||||
let b = stack.pop_back();
|
||||
stack.push(if !self.is_zero(&b) {
|
||||
a.overflowing_div(b).0
|
||||
match b {
|
||||
ONE => a,
|
||||
TWO => a >> 1,
|
||||
TWO_POW_5 => a >> 5,
|
||||
TWO_POW_8 => a >> 8,
|
||||
TWO_POW_16 => a >> 16,
|
||||
TWO_POW_24 => a >> 24,
|
||||
TWO_POW_64 => a >> 64,
|
||||
TWO_POW_96 => a >> 96,
|
||||
TWO_POW_224 => a >> 224,
|
||||
TWO_POW_248 => a >> 248,
|
||||
_ => a.overflowing_div(b).0,
|
||||
}
|
||||
} else {
|
||||
U256::zero()
|
||||
});
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
use common::*;
|
||||
use evmjit;
|
||||
use evm::{self, GasLeft};
|
||||
use types::executed::CallType;
|
||||
|
||||
/// Should be used to convert jit types to ethcore
|
||||
trait FromJit<T>: Sized {
|
||||
@@ -77,10 +78,11 @@ impl IntoJit<evmjit::I256> for U256 {
|
||||
impl IntoJit<evmjit::I256> for H256 {
|
||||
fn into_jit(self) -> evmjit::I256 {
|
||||
let mut ret = [0; 4];
|
||||
for i in 0..self.bytes().len() {
|
||||
let rev = self.bytes().len() - 1 - i;
|
||||
let len = self.len();
|
||||
for i in 0..len {
|
||||
let rev = len - 1 - i;
|
||||
let pos = rev / 8;
|
||||
ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8);
|
||||
ret[pos] += (self[i] as u64) << ((rev % 8) * 8);
|
||||
}
|
||||
evmjit::I256 { words: ret }
|
||||
}
|
||||
@@ -194,6 +196,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
||||
receive_address: *const evmjit::H256,
|
||||
code_address: *const evmjit::H256,
|
||||
transfer_value: *const evmjit::I256,
|
||||
// Ignoring apparent value - it's handled correctly in executive.
|
||||
_apparent_value: *const evmjit::I256,
|
||||
in_beg: *const u8,
|
||||
in_size: u64,
|
||||
@@ -207,10 +210,12 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
||||
let receive_address = unsafe { Address::from_jit(&*receive_address) };
|
||||
let code_address = unsafe { Address::from_jit(&*code_address) };
|
||||
let transfer_value = unsafe { U256::from_jit(&*transfer_value) };
|
||||
let value = Some(transfer_value);
|
||||
|
||||
// receive address and code address are the same in normal calls
|
||||
let is_callcode = receive_address != code_address;
|
||||
let is_delegatecall = is_callcode && sender_address != receive_address;
|
||||
|
||||
let value = if is_delegatecall { None } else { Some(transfer_value) };
|
||||
|
||||
if !is_callcode && !self.ext.exists(&code_address) {
|
||||
gas_cost = gas_cost + U256::from(self.ext.schedule().call_new_account_gas);
|
||||
@@ -239,6 +244,12 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
let call_type = match (is_callcode, is_delegatecall) {
|
||||
(_, true) => CallType::DelegateCall,
|
||||
(true, false) => CallType::CallCode,
|
||||
(false, false) => CallType::Call,
|
||||
};
|
||||
|
||||
match self.ext.call(
|
||||
&call_gas,
|
||||
&sender_address,
|
||||
@@ -246,7 +257,9 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
||||
value,
|
||||
unsafe { slice::from_raw_parts(in_beg, in_size as usize) },
|
||||
&code_address,
|
||||
unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) {
|
||||
unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) },
|
||||
call_type,
|
||||
) {
|
||||
evm::MessageCallResult::Success(gas_left) => unsafe {
|
||||
*io_gas = (gas + gas_left).low_u64();
|
||||
true
|
||||
|
||||
@@ -140,6 +140,10 @@ impl Ext for FakeExt {
|
||||
self.codes.get(address).unwrap_or(&Bytes::new()).clone()
|
||||
}
|
||||
|
||||
fn extcode_len(&self, address: &Address) -> u64 {
|
||||
self.codes.get(address).map_or(0, |c| c.len() as u64)
|
||||
}
|
||||
|
||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
||||
self.logs.push(FakeLogEntry {
|
||||
topics: topics,
|
||||
|
||||
@@ -206,6 +206,10 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT
|
||||
self.state.code(address).unwrap_or_else(|| vec![])
|
||||
}
|
||||
|
||||
fn extcode_len(&self, address: &Address) -> u64 {
|
||||
self.state.code_size(address).unwrap_or(0)
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||
fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result<U256>
|
||||
where Self: Sized {
|
||||
|
||||
@@ -132,6 +132,10 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer {
|
||||
self.ext.extcode(address)
|
||||
}
|
||||
|
||||
fn extcode_len(&self, address: &Address) -> u64 {
|
||||
self.ext.extcode_len(address)
|
||||
}
|
||||
|
||||
fn log(&mut self, topics: Vec<H256>, data: &[u8]) {
|
||||
self.ext.log(topics, data)
|
||||
}
|
||||
|
||||
@@ -100,8 +100,12 @@ extern crate ethcore_ipc_nano as nanoipc;
|
||||
extern crate ethcore_devtools as devtools;
|
||||
extern crate rand;
|
||||
extern crate bit_set;
|
||||
extern crate lru_cache;
|
||||
extern crate bloomfilter;
|
||||
extern crate byteorder;
|
||||
|
||||
#[cfg(feature = "jit" )] extern crate evmjit;
|
||||
#[cfg(feature = "jit" )]
|
||||
extern crate evmjit;
|
||||
|
||||
pub mod account_provider;
|
||||
pub mod engines;
|
||||
@@ -130,6 +134,7 @@ mod basic_types;
|
||||
mod env_info;
|
||||
mod pod_account;
|
||||
mod state;
|
||||
mod state_db;
|
||||
mod account;
|
||||
mod account_db;
|
||||
mod builtin;
|
||||
|
||||
104
ethcore/src/migrations/account_bloom.rs
Normal file
104
ethcore/src/migrations/account_bloom.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Bloom upgrade
|
||||
|
||||
use client::{DB_COL_EXTRA, DB_COL_HEADERS, DB_NO_OF_COLUMNS, DB_COL_STATE, DB_COL_ACCOUNT_BLOOM};
|
||||
use state_db::{ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET, StateDB, ACCOUNT_BLOOM_HASHCOUNT_KEY};
|
||||
use util::trie::TrieDB;
|
||||
use views::HeaderView;
|
||||
use bloomfilter::Bloom;
|
||||
use util::migration::Error;
|
||||
use util::journaldb;
|
||||
use util::{H256, FixedHash, BytesConvertable};
|
||||
use util::{Database, DatabaseConfig, DBTransaction, CompactionProfile};
|
||||
use std::path::Path;
|
||||
|
||||
fn check_bloom_exists(db: &Database) -> bool {
|
||||
let hash_count_entry = db.get(DB_COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
|
||||
.expect("Low-level database error");
|
||||
|
||||
hash_count_entry.is_some()
|
||||
}
|
||||
|
||||
/// Account bloom upgrade routine. If bloom already present, does nothing.
|
||||
/// If database empty (no best block), does nothing.
|
||||
/// Can be called on upgraded database with no issues (will do nothing).
|
||||
pub fn upgrade_account_bloom(db_path: &Path) -> Result<(), Error> {
|
||||
let path = try!(db_path.to_str().ok_or(Error::MigrationImpossible));
|
||||
trace!(target: "migration", "Account bloom upgrade at {:?}", db_path);
|
||||
|
||||
let source = try!(Database::open(&DatabaseConfig {
|
||||
max_open_files: 64,
|
||||
cache_size: None,
|
||||
compaction: CompactionProfile::default(),
|
||||
columns: DB_NO_OF_COLUMNS,
|
||||
wal: true,
|
||||
}, path));
|
||||
|
||||
let best_block_hash = match try!(source.get(DB_COL_EXTRA, b"best")) {
|
||||
// no migration needed
|
||||
None => {
|
||||
trace!(target: "migration", "No best block hash, skipping");
|
||||
return Ok(());
|
||||
},
|
||||
Some(hash) => hash,
|
||||
};
|
||||
let best_block_header = match try!(source.get(DB_COL_HEADERS, &best_block_hash)) {
|
||||
// no best block, nothing to do
|
||||
None => {
|
||||
trace!(target: "migration", "No best block header, skipping");
|
||||
return Ok(())
|
||||
},
|
||||
Some(x) => x,
|
||||
};
|
||||
let state_root = HeaderView::new(&best_block_header).state_root();
|
||||
|
||||
if check_bloom_exists(&source) {
|
||||
// bloom already exists, nothing to do
|
||||
trace!(target: "migration", "Bloom already present, skipping");
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
println!("Adding accounts bloom (one-time upgrade)");
|
||||
let db = ::std::sync::Arc::new(source);
|
||||
let bloom_journal = {
|
||||
let mut bloom = Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET);
|
||||
// no difference what algorithm is passed, since there will be no writes
|
||||
let state_db = journaldb::new(
|
||||
db.clone(),
|
||||
journaldb::Algorithm::OverlayRecent,
|
||||
DB_COL_STATE);
|
||||
let account_trie = try!(TrieDB::new(state_db.as_hashdb(), &state_root).map_err(|e| Error::Custom(format!("Cannot open trie: {:?}", e))));
|
||||
for (ref account_key, _) in account_trie.iter() {
|
||||
let account_key_hash = H256::from_slice(&account_key);
|
||||
bloom.set(account_key_hash.as_slice());
|
||||
}
|
||||
|
||||
bloom.drain_journal()
|
||||
};
|
||||
|
||||
trace!(target: "migration", "Generated {} bloom updates", bloom_journal.entries.len());
|
||||
|
||||
let batch = DBTransaction::new(&db);
|
||||
try!(StateDB::commit_bloom(&batch, bloom_journal).map_err(|_| Error::Custom("Failed to commit bloom".to_owned())));
|
||||
try!(db.write(batch));
|
||||
|
||||
trace!(target: "migration", "Finished bloom update");
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -23,3 +23,9 @@ pub mod extras;
|
||||
mod v9;
|
||||
pub use self::v9::ToV9;
|
||||
pub use self::v9::Extract;
|
||||
|
||||
mod account_bloom;
|
||||
pub use self::account_bloom::upgrade_account_bloom;
|
||||
|
||||
mod v10;
|
||||
pub use self::v10::ToV10;
|
||||
|
||||
35
ethcore/src/migrations/v10.rs
Normal file
35
ethcore/src/migrations/v10.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! This migration compresses the state db.
|
||||
|
||||
use util::migration::SimpleMigration;
|
||||
|
||||
/// Compressing migration.
|
||||
#[derive(Default)]
|
||||
pub struct ToV10;
|
||||
|
||||
impl SimpleMigration for ToV10 {
|
||||
fn version(&self) -> u32 {
|
||||
10
|
||||
}
|
||||
|
||||
fn columns(&self) -> Option<u32> { Some(6) }
|
||||
|
||||
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
||||
Some((key, value))
|
||||
}
|
||||
}
|
||||
@@ -286,6 +286,7 @@ impl Miner {
|
||||
};
|
||||
|
||||
let mut invalid_transactions = HashSet::new();
|
||||
let mut transactions_to_penalize = HashSet::new();
|
||||
let block_number = open_block.block().fields().header.number();
|
||||
// TODO: push new uncles, too.
|
||||
for tx in transactions {
|
||||
@@ -293,6 +294,12 @@ impl Miner {
|
||||
match open_block.push_transaction(tx, None) {
|
||||
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => {
|
||||
debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas);
|
||||
|
||||
// Penalize transaction if it's above current gas limit
|
||||
if gas > gas_limit {
|
||||
transactions_to_penalize.insert(hash);
|
||||
}
|
||||
|
||||
// Exit early if gas left is smaller then min_tx_gas
|
||||
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
|
||||
if gas_limit - gas_used < min_tx_gas {
|
||||
@@ -328,6 +335,9 @@ impl Miner {
|
||||
for hash in invalid_transactions.into_iter() {
|
||||
queue.remove_invalid(&hash, &fetch_account);
|
||||
}
|
||||
for hash in transactions_to_penalize {
|
||||
queue.penalize(&hash);
|
||||
}
|
||||
}
|
||||
|
||||
if !block.transactions().is_empty() {
|
||||
@@ -869,7 +879,7 @@ impl MinerService for Miner {
|
||||
out_of_chain.for_each(|txs| {
|
||||
let mut transaction_queue = self.transaction_queue.lock();
|
||||
let _ = self.add_transactions_to_queue(
|
||||
chain, txs, TransactionOrigin::External, &mut transaction_queue
|
||||
chain, txs, TransactionOrigin::RetractedBlock, &mut transaction_queue
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -99,6 +99,8 @@ pub enum TransactionOrigin {
|
||||
Local,
|
||||
/// External transaction received from network
|
||||
External,
|
||||
/// Transactions from retracted blocks
|
||||
RetractedBlock,
|
||||
}
|
||||
|
||||
impl PartialOrd for TransactionOrigin {
|
||||
@@ -113,10 +115,11 @@ impl Ord for TransactionOrigin {
|
||||
return Ordering::Equal;
|
||||
}
|
||||
|
||||
if *self == TransactionOrigin::Local {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Greater
|
||||
match (*self, *other) {
|
||||
(TransactionOrigin::RetractedBlock, _) => Ordering::Less,
|
||||
(_, TransactionOrigin::RetractedBlock) => Ordering::Greater,
|
||||
(TransactionOrigin::Local, _) => Ordering::Less,
|
||||
_ => Ordering::Greater,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -131,10 +134,15 @@ struct TransactionOrder {
|
||||
/// Gas Price of the transaction.
|
||||
/// Low gas price = Low priority (processed later)
|
||||
gas_price: U256,
|
||||
/// Gas (limit) of the transaction.
|
||||
/// Low gas limit = High priority (processed earlier)
|
||||
gas: U256,
|
||||
/// Hash to identify associated transaction
|
||||
hash: H256,
|
||||
/// Origin of the transaction
|
||||
origin: TransactionOrigin,
|
||||
/// Penalties
|
||||
penalties: usize,
|
||||
}
|
||||
|
||||
|
||||
@@ -143,8 +151,10 @@ impl TransactionOrder {
|
||||
TransactionOrder {
|
||||
nonce_height: tx.nonce() - base_nonce,
|
||||
gas_price: tx.transaction.gas_price,
|
||||
gas: tx.transaction.gas,
|
||||
hash: tx.hash(),
|
||||
origin: tx.origin,
|
||||
penalties: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,6 +162,11 @@ impl TransactionOrder {
|
||||
self.nonce_height = nonce - base_nonce;
|
||||
self
|
||||
}
|
||||
|
||||
fn penalize(mut self) -> Self {
|
||||
self.penalties = self.penalties.saturating_add(1);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for TransactionOrder {}
|
||||
@@ -168,6 +183,11 @@ impl PartialOrd for TransactionOrder {
|
||||
|
||||
impl Ord for TransactionOrder {
|
||||
fn cmp(&self, b: &TransactionOrder) -> Ordering {
|
||||
// First check number of penalties
|
||||
if self.penalties != b.penalties {
|
||||
return self.penalties.cmp(&b.penalties);
|
||||
}
|
||||
|
||||
// First check nonce_height
|
||||
if self.nonce_height != b.nonce_height {
|
||||
return self.nonce_height.cmp(&b.nonce_height);
|
||||
@@ -179,11 +199,18 @@ impl Ord for TransactionOrder {
|
||||
return self.origin.cmp(&b.origin);
|
||||
}
|
||||
|
||||
// Then compare gas_prices
|
||||
let a_gas = self.gas_price;
|
||||
let b_gas = b.gas_price;
|
||||
// Then compare gas usage
|
||||
let a_gas = self.gas;
|
||||
let b_gas = b.gas;
|
||||
if a_gas != b_gas {
|
||||
return b_gas.cmp(&a_gas);
|
||||
return a_gas.cmp(&b_gas);
|
||||
}
|
||||
|
||||
// Then compare gas_prices
|
||||
let a_gas_price = self.gas_price;
|
||||
let b_gas_price = b.gas_price;
|
||||
if a_gas_price != b_gas_price {
|
||||
return b_gas_price.cmp(&a_gas_price);
|
||||
}
|
||||
|
||||
// Compare hashes
|
||||
@@ -321,7 +348,7 @@ pub struct AccountDetails {
|
||||
|
||||
|
||||
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
|
||||
const GAS_LIMIT_HYSTERESIS: usize = 10; // %
|
||||
const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) %
|
||||
|
||||
/// `TransactionQueue` implementation
|
||||
pub struct TransactionQueue {
|
||||
@@ -504,6 +531,39 @@ impl TransactionQueue {
|
||||
assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len());
|
||||
}
|
||||
|
||||
/// Penalize transactions from sender of transaction with given hash.
|
||||
/// I.e. it should change the priority of the transaction in the queue.
|
||||
///
|
||||
/// NOTE: We need to penalize all transactions from particular sender
|
||||
/// to avoid breaking invariants in queue (ordered by nonces).
|
||||
/// Consecutive transactions from this sender would fail otherwise (because of invalid nonce).
|
||||
pub fn penalize(&mut self, transaction_hash: &H256) {
|
||||
let transaction = match self.by_hash.get(transaction_hash) {
|
||||
None => return,
|
||||
Some(t) => t,
|
||||
};
|
||||
let sender = transaction.sender();
|
||||
|
||||
// Penalize all transactions from this sender
|
||||
let nonces_from_sender = match self.current.by_address.row(&sender) {
|
||||
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
|
||||
None => vec![],
|
||||
};
|
||||
for k in nonces_from_sender {
|
||||
let order = self.current.drop(&sender, &k).unwrap();
|
||||
self.current.insert(sender, k, order.penalize());
|
||||
}
|
||||
// Same thing for future
|
||||
let nonces_from_sender = match self.future.by_address.row(&sender) {
|
||||
Some(row_map) => row_map.keys().cloned().collect::<Vec<U256>>(),
|
||||
None => vec![],
|
||||
};
|
||||
for k in nonces_from_sender {
|
||||
let order = self.future.drop(&sender, &k).unwrap();
|
||||
self.current.insert(sender, k, order.penalize());
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes invalid transaction identified by hash from queue.
|
||||
/// Assumption is that this transaction nonce is not related to client nonce,
|
||||
/// so transactions left in queue are processed according to client nonce.
|
||||
@@ -577,7 +637,10 @@ impl TransactionQueue {
|
||||
// Goes to future or is removed
|
||||
let order = self.current.drop(sender, &k).unwrap();
|
||||
if k >= current_nonce {
|
||||
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
||||
let order = order.update_height(k, current_nonce);
|
||||
if let Some(old) = self.future.insert(*sender, k, order.clone()) {
|
||||
Self::replace_orders(*sender, k, old, order, &mut self.future, &mut self.by_hash);
|
||||
}
|
||||
} else {
|
||||
trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
|
||||
@@ -641,7 +704,9 @@ impl TransactionQueue {
|
||||
self.future.by_priority.remove(&order);
|
||||
// Put to current
|
||||
let order = order.update_height(current_nonce, first_nonce);
|
||||
self.current.insert(address, current_nonce, order);
|
||||
if let Some(old) = self.current.insert(address, current_nonce, order.clone()) {
|
||||
Self::replace_orders(address, current_nonce, old, order, &mut self.current, &mut self.by_hash);
|
||||
}
|
||||
update_last_nonce_to = Some(current_nonce);
|
||||
current_nonce = current_nonce + U256::one();
|
||||
}
|
||||
@@ -673,45 +738,51 @@ impl TransactionQueue {
|
||||
|
||||
let address = tx.sender();
|
||||
let nonce = tx.nonce();
|
||||
|
||||
let next_nonce = self.last_nonces
|
||||
.get(&address)
|
||||
.cloned()
|
||||
.map_or(state_nonce, |n| n + U256::one());
|
||||
let hash = tx.hash();
|
||||
|
||||
// The transaction might be old, let's check that.
|
||||
// This has to be the first test, otherwise calculating
|
||||
// nonce height would result in overflow.
|
||||
if nonce < state_nonce {
|
||||
// Droping transaction
|
||||
trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce);
|
||||
trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", hash, nonce, state_nonce);
|
||||
return Err(TransactionError::Old);
|
||||
} else if nonce > next_nonce {
|
||||
}
|
||||
|
||||
// Update nonces of transactions in future (remove old transactions)
|
||||
self.update_future(&address, state_nonce);
|
||||
// State nonce could be updated. Maybe there are some more items waiting in future?
|
||||
self.move_matching_future_to_current(address, state_nonce, state_nonce);
|
||||
// Check the next expected nonce (might be updated by move above)
|
||||
let next_nonce = self.last_nonces
|
||||
.get(&address)
|
||||
.cloned()
|
||||
.map_or(state_nonce, |n| n + U256::one());
|
||||
|
||||
// Future transaction
|
||||
if nonce > next_nonce {
|
||||
// We have a gap - put to future.
|
||||
// Update nonces of transactions in future (remove old transactions)
|
||||
self.update_future(&address, state_nonce);
|
||||
// Insert transaction (or replace old one with lower gas price)
|
||||
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash)));
|
||||
// Return an error if this transaction is not imported because of limit.
|
||||
try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash)));
|
||||
// Enforce limit in Future
|
||||
let removed = self.future.enforce_limit(&mut self.by_hash);
|
||||
// Return an error if this transaction was not imported because of limit.
|
||||
try!(check_if_removed(&address, &nonce, removed));
|
||||
|
||||
debug!(target: "txqueue", "Importing transaction to future: {:?}", hash);
|
||||
debug!(target: "txqueue", "status: {:?}", self.status());
|
||||
return Ok(TransactionImportResult::Future);
|
||||
}
|
||||
|
||||
// We might have filled a gap - move some more transactions from future
|
||||
self.move_matching_future_to_current(address, nonce, state_nonce);
|
||||
self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce);
|
||||
|
||||
// Replace transaction if any
|
||||
try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash)));
|
||||
// Keep track of highest nonce stored in current
|
||||
let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n));
|
||||
self.last_nonces.insert(address, new_max);
|
||||
// Update nonces of transactions in future
|
||||
self.update_future(&address, state_nonce);
|
||||
// Maybe there are some more items waiting in future?
|
||||
self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce);
|
||||
// There might be exactly the same transaction waiting in future
|
||||
// same (sender, nonce), but above function would not move it.
|
||||
if let Some(order) = self.future.drop(&address, &nonce) {
|
||||
// Let's insert that transaction to current (if it has higher gas_price)
|
||||
let future_tx = self.by_hash.remove(&order.hash).expect("All transactions in `future` are always in `by_hash`.");
|
||||
// if transaction in `current` (then one we are importing) is replaced it means that it has to low gas_price
|
||||
try!(check_too_cheap(!Self::replace_transaction(future_tx, state_nonce, &mut self.current, &mut self.by_hash)));
|
||||
}
|
||||
|
||||
// Also enforce the limit
|
||||
let removed = self.current.enforce_limit(&mut self.by_hash);
|
||||
@@ -755,24 +826,28 @@ impl TransactionQueue {
|
||||
|
||||
|
||||
if let Some(old) = set.insert(address, nonce, order.clone()) {
|
||||
// There was already transaction in queue. Let's check which one should stay
|
||||
let old_fee = old.gas_price;
|
||||
let new_fee = order.gas_price;
|
||||
if old_fee.cmp(&new_fee) == Ordering::Greater {
|
||||
// Put back old transaction since it has greater priority (higher gas_price)
|
||||
set.insert(address, nonce, old);
|
||||
// and remove new one
|
||||
by_hash.remove(&hash).expect("The hash has been just inserted and no other line is altering `by_hash`.");
|
||||
false
|
||||
} else {
|
||||
// Make sure we remove old transaction entirely
|
||||
by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`.");
|
||||
true
|
||||
}
|
||||
Self::replace_orders(address, nonce, old, order, set, by_hash)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn replace_orders(address: Address, nonce: U256, old: TransactionOrder, order: TransactionOrder, set: &mut TransactionSet, by_hash: &mut HashMap<H256, VerifiedTransaction>) -> bool {
|
||||
// There was already transaction in queue. Let's check which one should stay
|
||||
let old_fee = old.gas_price;
|
||||
let new_fee = order.gas_price;
|
||||
if old_fee.cmp(&new_fee) == Ordering::Greater {
|
||||
// Put back old transaction since it has greater priority (higher gas_price)
|
||||
set.insert(address, nonce, old);
|
||||
// and remove new one
|
||||
by_hash.remove(&order.hash).expect("The hash has been just inserted and no other line is altering `by_hash`.");
|
||||
false
|
||||
} else {
|
||||
// Make sure we remove old transaction entirely
|
||||
by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`.");
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check_too_cheap(is_in: bool) -> Result<(), TransactionError> {
|
||||
@@ -814,25 +889,40 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_unsigned_tx(nonce: U256) -> Transaction {
|
||||
fn default_nonce_val() -> U256 {
|
||||
123.into()
|
||||
}
|
||||
|
||||
fn default_gas_val() -> U256 {
|
||||
100_000.into()
|
||||
}
|
||||
|
||||
fn default_gas_price_val() -> U256 {
|
||||
1.into()
|
||||
}
|
||||
|
||||
fn new_unsigned_tx_with_gas(nonce: U256, gas: U256, gas_price: U256) -> Transaction {
|
||||
Transaction {
|
||||
action: Action::Create,
|
||||
value: U256::from(100),
|
||||
data: "3331600055".from_hex().unwrap(),
|
||||
gas: U256::from(100_000),
|
||||
gas_price: U256::one(),
|
||||
gas: gas,
|
||||
gas_price: gas_price,
|
||||
nonce: nonce
|
||||
}
|
||||
}
|
||||
|
||||
fn new_tx() -> SignedTransaction {
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
new_unsigned_tx(U256::from(123)).sign(keypair.secret())
|
||||
fn new_unsigned_tx(nonce: U256) -> Transaction {
|
||||
new_unsigned_tx_with_gas(nonce, default_gas_val(), default_gas_price_val())
|
||||
}
|
||||
|
||||
fn new_tx_with_gas(gas: U256, gas_price: U256) -> SignedTransaction {
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
new_unsigned_tx_with_gas(default_nonce_val(), gas, gas_price).sign(keypair.secret())
|
||||
}
|
||||
|
||||
fn default_nonce_val() -> U256 {
|
||||
U256::from(123)
|
||||
fn new_tx() -> SignedTransaction {
|
||||
new_tx_with_gas(default_gas_val(), default_gas_price_val())
|
||||
}
|
||||
|
||||
fn default_nonce(_address: &Address) -> AccountDetails {
|
||||
@@ -846,10 +936,10 @@ mod test {
|
||||
fn new_similar_txs() -> (SignedTransaction, SignedTransaction) {
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
let secret = &keypair.secret();
|
||||
let nonce = U256::from(123);
|
||||
let nonce = default_nonce_val();
|
||||
let tx = new_unsigned_tx(nonce);
|
||||
let mut tx2 = new_unsigned_tx(nonce);
|
||||
tx2.gas_price = U256::from(2);
|
||||
tx2.gas_price = 2.into();
|
||||
|
||||
(tx.sign(secret), tx2.sign(secret))
|
||||
}
|
||||
@@ -858,6 +948,18 @@ mod test {
|
||||
new_txs_with_gas_price_diff(second_nonce, U256::zero())
|
||||
}
|
||||
|
||||
fn new_txs_with_higher_gas_price(gas_price: U256) -> (SignedTransaction, SignedTransaction) {
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
let secret = &keypair.secret();
|
||||
let nonce = U256::from(123);
|
||||
let mut tx = new_unsigned_tx(nonce);
|
||||
tx.gas_price = tx.gas_price + gas_price;
|
||||
let mut tx2 = new_unsigned_tx(nonce + 1.into());
|
||||
tx2.gas_price = tx2.gas_price + gas_price;
|
||||
|
||||
(tx.sign(secret), tx2.sign(secret))
|
||||
}
|
||||
|
||||
fn new_txs_with_gas_price_diff(second_nonce: U256, gas_price: U256) -> (SignedTransaction, SignedTransaction) {
|
||||
let keypair = KeyPair::create().unwrap();
|
||||
let secret = &keypair.secret();
|
||||
@@ -869,6 +971,17 @@ mod test {
|
||||
(tx.sign(secret), tx2.sign(secret))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ordering() {
|
||||
assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::External), Ordering::Less);
|
||||
assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::Local), Ordering::Less);
|
||||
assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::External), Ordering::Less);
|
||||
|
||||
assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::Local), Ordering::Greater);
|
||||
assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater);
|
||||
assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_create_transaction_set() {
|
||||
// given
|
||||
@@ -972,6 +1085,31 @@ mod test {
|
||||
assert_eq!(txq.top_transactions()[0], tx2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_move_all_transactions_from_future() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let (tx, tx2) = new_txs_with_gas_price_diff(1.into(), 1.into());
|
||||
let prev_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce - U256::one(), balance:
|
||||
!U256::zero() };
|
||||
|
||||
// First insert one transaction to future
|
||||
let res = txq.add(tx.clone(), &prev_nonce, TransactionOrigin::External);
|
||||
assert_eq!(res.unwrap(), TransactionImportResult::Future);
|
||||
assert_eq!(txq.status().future, 1);
|
||||
|
||||
// now import second transaction to current
|
||||
let res = txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External);
|
||||
|
||||
// then
|
||||
assert_eq!(res.unwrap(), TransactionImportResult::Current);
|
||||
assert_eq!(txq.status().pending, 2);
|
||||
assert_eq!(txq.status().future, 0);
|
||||
assert_eq!(txq.current.by_priority.len(), 2);
|
||||
assert_eq!(txq.current.by_address.len(), 2);
|
||||
assert_eq!(txq.top_transactions()[0], tx);
|
||||
assert_eq!(txq.top_transactions()[1], tx2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_import_tx() {
|
||||
@@ -988,6 +1126,39 @@ mod test {
|
||||
assert_eq!(stats.pending, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_order_by_gas() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let tx1 = new_tx_with_gas(50000.into(), 40.into());
|
||||
let tx2 = new_tx_with_gas(40000.into(), 30.into());
|
||||
let tx3 = new_tx_with_gas(30000.into(), 10.into());
|
||||
let tx4 = new_tx_with_gas(50000.into(), 20.into());
|
||||
txq.set_minimal_gas_price(15.into());
|
||||
|
||||
// when
|
||||
let res1 = txq.add(tx1, &default_nonce, TransactionOrigin::External);
|
||||
let res2 = txq.add(tx2, &default_nonce, TransactionOrigin::External);
|
||||
let res3 = txq.add(tx3, &default_nonce, TransactionOrigin::External);
|
||||
let res4 = txq.add(tx4, &default_nonce, TransactionOrigin::External);
|
||||
|
||||
// then
|
||||
assert_eq!(res1.unwrap(), TransactionImportResult::Current);
|
||||
assert_eq!(res2.unwrap(), TransactionImportResult::Current);
|
||||
assert_eq!(unwrap_tx_err(res3), TransactionError::InsufficientGasPrice {
|
||||
minimal: U256::from(15),
|
||||
got: U256::from(10),
|
||||
});
|
||||
assert_eq!(res4.unwrap(), TransactionImportResult::Current);
|
||||
let stats = txq.status();
|
||||
assert_eq!(stats.pending, 3);
|
||||
assert_eq!(txq.top_transactions()[0].gas, 40000.into());
|
||||
assert_eq!(txq.top_transactions()[1].gas, 50000.into());
|
||||
assert_eq!(txq.top_transactions()[2].gas, 50000.into());
|
||||
assert_eq!(txq.top_transactions()[1].gas_price, 40.into());
|
||||
assert_eq!(txq.top_transactions()[2].gas_price, 20.into());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gas_limit_should_never_overflow() {
|
||||
// given
|
||||
@@ -1024,7 +1195,6 @@ mod test {
|
||||
assert_eq!(stats.future, 0);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn should_drop_transactions_from_senders_without_balance() {
|
||||
// given
|
||||
@@ -1086,7 +1256,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_reject_incorectly_signed_transaction() {
|
||||
fn should_reject_incorrectly_signed_transaction() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let tx = new_unsigned_tx(U256::from(123));
|
||||
@@ -1149,6 +1319,27 @@ mod test {
|
||||
assert_eq!(top.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_prioritize_reimported_transactions_within_same_nonce_height() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
let tx = new_tx();
|
||||
// the second one has same nonce but higher `gas_price`
|
||||
let (_, tx2) = new_similar_txs();
|
||||
|
||||
// when
|
||||
// first insert local one with higher gas price
|
||||
txq.add(tx2.clone(), &default_nonce, TransactionOrigin::Local).unwrap();
|
||||
// then the one with lower gas price, but from retracted block
|
||||
txq.add(tx.clone(), &default_nonce, TransactionOrigin::RetractedBlock).unwrap();
|
||||
|
||||
// then
|
||||
let top = txq.top_transactions();
|
||||
assert_eq!(top[0], tx); // retracted should be first
|
||||
assert_eq!(top[1], tx2);
|
||||
assert_eq!(top.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_not_prioritize_local_transactions_with_different_nonce_height() {
|
||||
// given
|
||||
@@ -1166,6 +1357,39 @@ mod test {
|
||||
assert_eq!(top.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_penalize_transactions_from_sender() {
|
||||
// given
|
||||
let mut txq = TransactionQueue::new();
|
||||
// txa, txb - slightly bigger gas price to have consistent ordering
|
||||
let (txa, txb) = new_txs(U256::from(1));
|
||||
let (tx1, tx2) = new_txs_with_higher_gas_price(U256::from(3));
|
||||
|
||||
// insert everything
|
||||
txq.add(txa.clone(), &default_nonce, TransactionOrigin::External).unwrap();
|
||||
txq.add(txb.clone(), &default_nonce, TransactionOrigin::External).unwrap();
|
||||
txq.add(tx1.clone(), &default_nonce, TransactionOrigin::External).unwrap();
|
||||
txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap();
|
||||
|
||||
let top = txq.top_transactions();
|
||||
assert_eq!(top[0], tx1);
|
||||
assert_eq!(top[1], txa);
|
||||
assert_eq!(top[2], tx2);
|
||||
assert_eq!(top[3], txb);
|
||||
assert_eq!(top.len(), 4);
|
||||
|
||||
// when
|
||||
txq.penalize(&tx1.hash());
|
||||
|
||||
// then
|
||||
let top = txq.top_transactions();
|
||||
assert_eq!(top[0], txa);
|
||||
assert_eq!(top[1], txb);
|
||||
assert_eq!(top[2], tx1);
|
||||
assert_eq!(top[3], tx2);
|
||||
assert_eq!(top.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_return_pending_hashes() {
|
||||
// given
|
||||
|
||||
@@ -47,7 +47,7 @@ impl PodAccount {
|
||||
PodAccount {
|
||||
balance: *acc.balance(),
|
||||
nonce: *acc.nonce(),
|
||||
storage: acc.storage_overlay().iter().fold(BTreeMap::new(), |mut m, (k, &(_, ref v))| {m.insert(k.clone(), v.clone()); m}),
|
||||
storage: acc.storage_changes().iter().fold(BTreeMap::new(), |mut m, (k, v)| {m.insert(k.clone(), v.clone()); m}),
|
||||
code: acc.code().map(|x| x.to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ impl Account {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use account_db::{AccountDB, AccountDBMut};
|
||||
use tests::helpers::get_temp_journal_db;
|
||||
use tests::helpers::get_temp_state_db;
|
||||
use snapshot::tests::helpers::fill_storage;
|
||||
|
||||
use util::{SHA3_NULL_RLP, SHA3_EMPTY};
|
||||
@@ -154,8 +154,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn encoding_basic() {
|
||||
let mut db = get_temp_journal_db();
|
||||
let mut db = &mut **db;
|
||||
let mut db = get_temp_state_db();
|
||||
let addr = Address::random();
|
||||
|
||||
let account = Account {
|
||||
@@ -175,8 +174,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn encoding_storage() {
|
||||
let mut db = get_temp_journal_db();
|
||||
let mut db = &mut **db;
|
||||
let mut db = get_temp_state_db();
|
||||
let addr = Address::random();
|
||||
|
||||
let account = {
|
||||
@@ -198,4 +196,4 @@ mod tests {
|
||||
let fat_rlp = UntrustedRlp::new(&fat_rlp);
|
||||
assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp).unwrap(), account);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,8 +25,9 @@ use blockchain::{BlockChain, BlockProvider};
|
||||
use engines::Engine;
|
||||
use ids::BlockID;
|
||||
use views::BlockView;
|
||||
use super::state_db::StateDB;
|
||||
|
||||
use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut};
|
||||
use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut, BytesConvertable};
|
||||
use util::Mutex;
|
||||
use util::hash::{FixedHash, H256};
|
||||
use util::journaldb::{self, Algorithm, JournalDB};
|
||||
@@ -453,7 +454,7 @@ impl StateRebuilder {
|
||||
Ok::<_, ::error::Error>(())
|
||||
}));
|
||||
|
||||
|
||||
let mut bloom = StateDB::load_bloom(&backing);
|
||||
// batch trie writes
|
||||
{
|
||||
let mut account_trie = if self.state_root != SHA3_NULL_RLP {
|
||||
@@ -463,11 +464,14 @@ impl StateRebuilder {
|
||||
};
|
||||
|
||||
for (hash, thin_rlp) in pairs {
|
||||
bloom.set(hash.as_slice());
|
||||
try!(account_trie.insert(&hash, &thin_rlp));
|
||||
}
|
||||
}
|
||||
|
||||
let bloom_journal = bloom.drain_journal();
|
||||
let batch = backing.transaction();
|
||||
try!(StateDB::commit_bloom(&batch, bloom_journal));
|
||||
try!(self.db.inject(&batch));
|
||||
try!(backing.write(batch).map_err(::util::UtilError::SimpleString));
|
||||
trace!(target: "snapshot", "current state root: {:?}", self.state_root);
|
||||
|
||||
@@ -20,6 +20,7 @@ use common::*;
|
||||
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority};
|
||||
use pod_state::*;
|
||||
use account_db::*;
|
||||
use state_db::StateDB;
|
||||
use super::genesis::Genesis;
|
||||
use super::seal::Generic as GenericSeal;
|
||||
use ethereum;
|
||||
@@ -229,19 +230,20 @@ impl Spec {
|
||||
}
|
||||
|
||||
/// Ensure that the given state DB has the trie nodes in for the genesis state.
|
||||
pub fn ensure_db_good(&self, db: &mut HashDB) -> Result<bool, Box<TrieError>> {
|
||||
if !db.contains(&self.state_root()) {
|
||||
pub fn ensure_db_good(&self, db: &mut StateDB) -> Result<bool, Box<TrieError>> {
|
||||
if !db.as_hashdb().contains(&self.state_root()) {
|
||||
let mut root = H256::new();
|
||||
{
|
||||
let mut t = SecTrieDBMut::new(db, &mut root);
|
||||
let mut t = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root);
|
||||
for (address, account) in self.genesis_state.get().iter() {
|
||||
try!(t.insert(address.as_slice(), &account.rlp()));
|
||||
}
|
||||
}
|
||||
for (address, account) in self.genesis_state.get().iter() {
|
||||
account.insert_additional(&mut AccountDBMut::new(db, address));
|
||||
db.note_account_bloom(address);
|
||||
account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address));
|
||||
}
|
||||
assert!(db.contains(&self.state_root()));
|
||||
assert!(db.as_hashdb().contains(&self.state_root()));
|
||||
Ok(true)
|
||||
} else { Ok(false) }
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use trace::FlatTrace;
|
||||
use pod_account::*;
|
||||
use pod_state::{self, PodState};
|
||||
use types::state_diff::StateDiff;
|
||||
use state_db::StateDB;
|
||||
|
||||
/// Used to return information about an `State::apply` operation.
|
||||
pub struct ApplyOutcome {
|
||||
@@ -37,23 +38,92 @@ pub struct ApplyOutcome {
|
||||
/// Result type for the execution ("application") of a transaction.
|
||||
pub type ApplyResult = Result<ApplyOutcome, Error>;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum AccountEntry {
|
||||
/// Contains account data.
|
||||
Cached(Account),
|
||||
/// Account has been deleted.
|
||||
Killed,
|
||||
/// Account does not exist.
|
||||
Missing,
|
||||
}
|
||||
|
||||
impl AccountEntry {
|
||||
fn is_dirty(&self) -> bool {
|
||||
match *self {
|
||||
AccountEntry::Cached(ref a) => a.is_dirty(),
|
||||
AccountEntry::Killed => true,
|
||||
AccountEntry::Missing => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Clone dirty data into new `AccountEntry`.
|
||||
/// Returns None if clean.
|
||||
fn clone_dirty(&self) -> Option<AccountEntry> {
|
||||
match *self {
|
||||
AccountEntry::Cached(ref acc) if acc.is_dirty() => Some(AccountEntry::Cached(acc.clone_dirty())),
|
||||
AccountEntry::Killed => Some(AccountEntry::Killed),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Clone account entry data that needs to be saved in the snapshot.
|
||||
/// This includes basic account information and all locally cached storage keys
|
||||
fn clone_for_snapshot(&self) -> AccountEntry {
|
||||
match *self {
|
||||
AccountEntry::Cached(ref acc) => AccountEntry::Cached(acc.clone_all()),
|
||||
AccountEntry::Killed => AccountEntry::Killed,
|
||||
AccountEntry::Missing => AccountEntry::Missing,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Representation of the entire state of all accounts in the system.
|
||||
///
|
||||
/// `State` can work together with `StateDB` to share account cache.
|
||||
///
|
||||
/// Local cache contains changes made locally and changes accumulated
|
||||
/// locally from previous commits. Global cache reflects the database
|
||||
/// state and never contains any changes.
|
||||
///
|
||||
/// Account data can be in the following cache states:
|
||||
/// * In global but not local - something that was queried from the database,
|
||||
/// but never modified
|
||||
/// * In local but not global - something that was just added (e.g. new account)
|
||||
/// * In both with the same value - something that was changed to a new value,
|
||||
/// but changed back to a previous block in the same block (same State instance)
|
||||
/// * In both with different values - something that was overwritten with a
|
||||
/// new value.
|
||||
///
|
||||
/// All read-only state queries check local cache/modifications first,
|
||||
/// then global state cache. If data is not found in any of the caches
|
||||
/// it is loaded from the DB to the local cache.
|
||||
///
|
||||
/// Upon destruction all the local cache data merged into the global cache.
|
||||
/// The merge might be rejected if current state is non-canonical.
|
||||
pub struct State {
|
||||
db: Box<JournalDB>,
|
||||
db: StateDB,
|
||||
root: H256,
|
||||
cache: RefCell<HashMap<Address, Option<Account>>>,
|
||||
snapshots: RefCell<Vec<HashMap<Address, Option<Option<Account>>>>>,
|
||||
cache: RefCell<HashMap<Address, AccountEntry>>,
|
||||
snapshots: RefCell<Vec<HashMap<Address, Option<AccountEntry>>>>,
|
||||
account_start_nonce: U256,
|
||||
trie_factory: TrieFactory,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum RequireCache {
|
||||
None,
|
||||
CodeSize,
|
||||
Code,
|
||||
}
|
||||
|
||||
const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \
|
||||
Therefore creating a SecTrieDB with this state's root will not fail.";
|
||||
|
||||
impl State {
|
||||
/// Creates new state with empty state root
|
||||
#[cfg(test)]
|
||||
pub fn new(mut db: Box<JournalDB>, account_start_nonce: U256, trie_factory: TrieFactory) -> State {
|
||||
pub fn new(mut db: StateDB, account_start_nonce: U256, trie_factory: TrieFactory) -> State {
|
||||
let mut root = H256::new();
|
||||
{
|
||||
// init trie and reset root too null
|
||||
@@ -71,7 +141,7 @@ impl State {
|
||||
}
|
||||
|
||||
/// Creates new state with existing state root
|
||||
pub fn from_existing(db: Box<JournalDB>, root: H256, account_start_nonce: U256, trie_factory: TrieFactory) -> Result<State, TrieError> {
|
||||
pub fn from_existing(db: StateDB, root: H256, account_start_nonce: U256, trie_factory: TrieFactory) -> Result<State, TrieError> {
|
||||
if !db.as_hashdb().contains(&root) {
|
||||
return Err(TrieError::InvalidStateRoot(root));
|
||||
}
|
||||
@@ -115,14 +185,21 @@ impl State {
|
||||
self.cache.borrow_mut().insert(k, v);
|
||||
},
|
||||
None => {
|
||||
self.cache.borrow_mut().remove(&k);
|
||||
match self.cache.borrow_mut().entry(k) {
|
||||
::std::collections::hash_map::Entry::Occupied(e) => {
|
||||
if e.get().is_dirty() {
|
||||
e.remove();
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_cache(&self, address: &Address, account: Option<Account>) {
|
||||
fn insert_cache(&self, address: &Address, account: AccountEntry) {
|
||||
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
|
||||
if !snapshot.contains_key(address) {
|
||||
snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account));
|
||||
@@ -135,13 +212,14 @@ impl State {
|
||||
fn note_cache(&self, address: &Address) {
|
||||
if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() {
|
||||
if !snapshot.contains_key(address) {
|
||||
snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned());
|
||||
snapshot.insert(address.clone(), self.cache.borrow().get(address).map(AccountEntry::clone_for_snapshot));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy the current object and return root and database.
|
||||
pub fn drop(self) -> (H256, Box<JournalDB>) {
|
||||
pub fn drop(mut self) -> (H256, StateDB) {
|
||||
self.commit_cache();
|
||||
(self.root, self.db)
|
||||
}
|
||||
|
||||
@@ -153,41 +231,95 @@ impl State {
|
||||
/// Create a new contract at address `contract`. If there is already an account at the address
|
||||
/// it will have its code reset, ready for `init_code()`.
|
||||
pub fn new_contract(&mut self, contract: &Address, balance: U256) {
|
||||
self.insert_cache(contract, Some(Account::new_contract(balance, self.account_start_nonce)));
|
||||
self.insert_cache(contract, AccountEntry::Cached(Account::new_contract(balance, self.account_start_nonce)));
|
||||
}
|
||||
|
||||
/// Remove an existing account.
|
||||
pub fn kill_account(&mut self, account: &Address) {
|
||||
self.insert_cache(account, None);
|
||||
self.insert_cache(account, AccountEntry::Killed);
|
||||
}
|
||||
|
||||
/// Determine whether an account exists.
|
||||
pub fn exists(&self, a: &Address) -> bool {
|
||||
self.ensure_cached(a, false, |a| a.is_some())
|
||||
self.ensure_cached(a, RequireCache::None, |a| a.is_some())
|
||||
}
|
||||
|
||||
/// Get the balance of account `a`.
|
||||
pub fn balance(&self, a: &Address) -> U256 {
|
||||
self.ensure_cached(a, false,
|
||||
self.ensure_cached(a, RequireCache::None,
|
||||
|a| a.as_ref().map_or(U256::zero(), |account| *account.balance()))
|
||||
}
|
||||
|
||||
/// Get the nonce of account `a`.
|
||||
pub fn nonce(&self, a: &Address) -> U256 {
|
||||
self.ensure_cached(a, false,
|
||||
self.ensure_cached(a, RequireCache::None,
|
||||
|a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce()))
|
||||
}
|
||||
|
||||
/// Mutate storage of account `address` so that it is `value` for `key`.
|
||||
pub fn storage_at(&self, address: &Address, key: &H256) -> H256 {
|
||||
self.ensure_cached(address, false,
|
||||
|a| a.as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::from_hash(self.db.as_hashdb(), a.address_hash(address)), key)))
|
||||
// Storage key search and update works like this:
|
||||
// 1. Check bloom to see if account never used surely
|
||||
// 2. If there's an entry for the account in the local cache check for the key and return it if found.
|
||||
// 3. If there's an entry for the account in the global cache check for the key or load it into that account.
|
||||
// 4. If account is missing in the global cache load it into the local cache and cache the key there.
|
||||
|
||||
// check bloom
|
||||
|
||||
// check local cache first without updating
|
||||
{
|
||||
let local_cache = self.cache.borrow_mut();
|
||||
let mut local_account = None;
|
||||
if let Some(maybe_acc) = local_cache.get(address) {
|
||||
match *maybe_acc {
|
||||
AccountEntry::Cached(ref account) => {
|
||||
if let Some(value) = account.cached_storage_at(key) {
|
||||
return value;
|
||||
} else {
|
||||
local_account = Some(maybe_acc);
|
||||
}
|
||||
},
|
||||
_ => return H256::new(),
|
||||
}
|
||||
}
|
||||
// check the global cache and and cache storage key there if found,
|
||||
// otherwise cache the account localy and cache storage key there.
|
||||
if let Some(result) = self.db.get_cached(address, |acc| acc.map_or(H256::new(), |a| a.storage_at(&AccountDB::from_hash(self.db.as_hashdb(), a.address_hash(address)), key))) {
|
||||
return result;
|
||||
}
|
||||
if let Some(ref mut acc) = local_account {
|
||||
if let AccountEntry::Cached(ref account) = **acc {
|
||||
return account.storage_at(&AccountDB::from_hash(self.db.as_hashdb(), account.address_hash(address)), key)
|
||||
} else {
|
||||
return H256::new()
|
||||
}
|
||||
}
|
||||
}
|
||||
// account is not found in the global cache, get from the DB and insert into local
|
||||
if !self.db.check_account_bloom(address) { return H256::zero() }
|
||||
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||
let maybe_acc = match db.get(address) {
|
||||
Ok(acc) => acc.map(Account::from_rlp),
|
||||
Err(e) => panic!("Potential DB corruption encountered: {}", e),
|
||||
};
|
||||
let r = maybe_acc.as_ref().map_or(H256::new(), |a| a.storage_at(&AccountDB::from_hash(self.db.as_hashdb(), a.address_hash(address)), key));
|
||||
match maybe_acc {
|
||||
Some(account) => self.insert_cache(address, AccountEntry::Cached(account)),
|
||||
None => self.insert_cache(address, AccountEntry::Missing),
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
/// Mutate storage of account `a` so that it is `value` for `key`.
|
||||
/// Get accounts' code.
|
||||
pub fn code(&self, a: &Address) -> Option<Bytes> {
|
||||
self.ensure_cached(a, true,
|
||||
|a| a.as_ref().map_or(None, |a|a.code().map(|x|x.to_vec())))
|
||||
self.ensure_cached(a, RequireCache::Code,
|
||||
|a| a.as_ref().map_or(None, |a| a.code().map(|x|x.to_vec())))
|
||||
}
|
||||
|
||||
/// Get accounts' code size.
|
||||
pub fn code_size(&self, a: &Address) -> Option<u64> {
|
||||
self.ensure_cached(a, RequireCache::CodeSize,
|
||||
|a| a.as_ref().and_then(|a| a.code_size()))
|
||||
}
|
||||
|
||||
/// Add `incr` to the balance of account `a`.
|
||||
@@ -248,19 +380,19 @@ impl State {
|
||||
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
|
||||
/// `accounts` is mutable because we may need to commit the code or storage and record that.
|
||||
#[cfg_attr(feature="dev", allow(match_ref_pats))]
|
||||
pub fn commit_into(
|
||||
fn commit_into(
|
||||
trie_factory: &TrieFactory,
|
||||
db: &mut HashDB,
|
||||
db: &mut StateDB,
|
||||
root: &mut H256,
|
||||
accounts: &mut HashMap<Address,
|
||||
Option<Account>>
|
||||
accounts: &mut HashMap<Address, AccountEntry>
|
||||
) -> Result<(), Error> {
|
||||
// first, commit the sub trees.
|
||||
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
|
||||
for (address, ref mut a) in accounts.iter_mut() {
|
||||
match a {
|
||||
&mut&mut Some(ref mut account) if account.is_dirty() => {
|
||||
let mut account_db = AccountDBMut::from_hash(db, account.address_hash(address));
|
||||
&mut&mut AccountEntry::Cached(ref mut account) if account.is_dirty() => {
|
||||
db.note_account_bloom(&address);
|
||||
let mut account_db = AccountDBMut::from_hash(db.as_hashdb_mut(), account.address_hash(address));
|
||||
account.commit_storage(trie_factory, &mut account_db);
|
||||
account.commit_code(&mut account_db);
|
||||
}
|
||||
@@ -269,15 +401,18 @@ impl State {
|
||||
}
|
||||
|
||||
{
|
||||
let mut trie = trie_factory.from_existing(db, root).unwrap();
|
||||
let mut trie = trie_factory.from_existing(db.as_hashdb_mut(), root).unwrap();
|
||||
for (address, ref mut a) in accounts.iter_mut() {
|
||||
match **a {
|
||||
Some(ref mut account) if account.is_dirty() => {
|
||||
AccountEntry::Cached(ref mut account) if account.is_dirty() => {
|
||||
account.set_clean();
|
||||
try!(trie.insert(address, &account.rlp()))
|
||||
try!(trie.insert(address, &account.rlp()));
|
||||
},
|
||||
None => try!(trie.remove(address)),
|
||||
_ => (),
|
||||
AccountEntry::Killed => {
|
||||
try!(trie.remove(address));
|
||||
**a = AccountEntry::Missing;
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -285,10 +420,27 @@ impl State {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit_cache(&mut self) {
|
||||
let mut addresses = self.cache.borrow_mut();
|
||||
for (address, a) in addresses.drain() {
|
||||
match a {
|
||||
AccountEntry::Cached(account) => {
|
||||
if !account.is_dirty() {
|
||||
self.db.cache_account(address, Some(account));
|
||||
}
|
||||
},
|
||||
AccountEntry::Missing => {
|
||||
self.db.cache_account(address, None);
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Commits our cached account changes into the trie.
|
||||
pub fn commit(&mut self) -> Result<(), Error> {
|
||||
assert!(self.snapshots.borrow().is_empty());
|
||||
Self::commit_into(&self.trie_factory, self.db.as_hashdb_mut(), &mut self.root, &mut *self.cache.borrow_mut())
|
||||
Self::commit_into(&self.trie_factory, &mut self.db, &mut self.root, &mut *self.cache.borrow_mut())
|
||||
}
|
||||
|
||||
/// Clear state cache
|
||||
@@ -302,7 +454,8 @@ impl State {
|
||||
pub fn populate_from(&mut self, accounts: PodState) {
|
||||
assert!(self.snapshots.borrow().is_empty());
|
||||
for (add, acc) in accounts.drain().into_iter() {
|
||||
self.cache.borrow_mut().insert(add, Some(Account::from_pod(acc)));
|
||||
self.db.note_account_bloom(&add);
|
||||
self.cache.borrow_mut().insert(add, AccountEntry::Cached(Account::from_pod(acc)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,7 +465,7 @@ impl State {
|
||||
// TODO: handle database rather than just the cache.
|
||||
// will need fat db.
|
||||
PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
|
||||
if let Some(ref acc) = *opt {
|
||||
if let AccountEntry::Cached(ref acc) = *opt {
|
||||
m.insert(add.clone(), PodAccount::from_account(acc));
|
||||
}
|
||||
m
|
||||
@@ -321,7 +474,7 @@ impl State {
|
||||
|
||||
fn query_pod(&mut self, query: &PodState) {
|
||||
for (ref address, ref pod_account) in query.get() {
|
||||
self.ensure_cached(address, true, |a| {
|
||||
self.ensure_cached(address, RequireCache::Code, |a| {
|
||||
if a.is_some() {
|
||||
for key in pod_account.storage.keys() {
|
||||
self.storage_at(address, key);
|
||||
@@ -340,27 +493,61 @@ impl State {
|
||||
pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post)
|
||||
}
|
||||
|
||||
/// Ensure account `a` is in our cache of the trie DB and return a handle for getting it.
|
||||
/// `require_code` requires that the code be cached, too.
|
||||
fn ensure_cached<'a, F, U>(&'a self, a: &'a Address, require_code: bool, f: F) -> U
|
||||
where F: FnOnce(&Option<Account>) -> U {
|
||||
let have_key = self.cache.borrow().contains_key(a);
|
||||
if !have_key {
|
||||
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||
let maybe_acc = match db.get(a) {
|
||||
Ok(acc) => acc.map(Account::from_rlp),
|
||||
Err(e) => panic!("Potential DB corruption encountered: {}", e),
|
||||
};
|
||||
self.insert_cache(a, maybe_acc);
|
||||
}
|
||||
if require_code {
|
||||
if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() {
|
||||
let addr_hash = account.address_hash(a);
|
||||
account.cache_code(&AccountDB::from_hash(self.db.as_hashdb(), addr_hash));
|
||||
fn update_account_cache(require: RequireCache, account: &mut Account, address: &Address, db: &HashDB) {
|
||||
match require {
|
||||
RequireCache::None => {},
|
||||
RequireCache::Code => {
|
||||
let address_hash = account.address_hash(address);
|
||||
account.cache_code(&AccountDB::from_hash(db, address_hash));
|
||||
}
|
||||
RequireCache::CodeSize => {
|
||||
let address_hash = account.address_hash(address);
|
||||
account.cache_code_size(&AccountDB::from_hash(db, address_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f(self.cache.borrow().get(a).unwrap())
|
||||
/// Check caches for required data
|
||||
/// First searches for account in the local, then the shared cache.
|
||||
/// Populates local cache if nothing found.
|
||||
fn ensure_cached<F, U>(&self, a: &Address, require: RequireCache, f: F) -> U
|
||||
where F: Fn(Option<&Account>) -> U {
|
||||
// check local cache first
|
||||
if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) {
|
||||
if let AccountEntry::Cached(ref mut account) = **maybe_acc {
|
||||
Self::update_account_cache(require, account, a, self.db.as_hashdb());
|
||||
return f(Some(account));
|
||||
}
|
||||
return f(None);
|
||||
}
|
||||
// check global cache
|
||||
let result = self.db.get_cached(a, |mut acc| {
|
||||
if let Some(ref mut account) = acc {
|
||||
Self::update_account_cache(require, account, a, self.db.as_hashdb());
|
||||
}
|
||||
f(acc.map(|a| &*a))
|
||||
});
|
||||
match result {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
// not found in the global cache, get from the DB and insert into local
|
||||
if !self.db.check_account_bloom(a) { return f(None); }
|
||||
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||
let mut maybe_acc = match db.get(a) {
|
||||
Ok(acc) => acc.map(Account::from_rlp),
|
||||
Err(e) => panic!("Potential DB corruption encountered: {}", e),
|
||||
};
|
||||
if let Some(ref mut account) = maybe_acc.as_mut() {
|
||||
Self::update_account_cache(require, account, a, self.db.as_hashdb());
|
||||
}
|
||||
let r = f(maybe_acc.as_ref());
|
||||
match maybe_acc {
|
||||
Some(account) => self.insert_cache(a, AccountEntry::Cached(account)),
|
||||
None => self.insert_cache(a, AccountEntry::Missing),
|
||||
}
|
||||
r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too.
|
||||
@@ -375,29 +562,45 @@ impl State {
|
||||
{
|
||||
let contains_key = self.cache.borrow().contains_key(a);
|
||||
if !contains_key {
|
||||
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||
let maybe_acc = match db.get(a) {
|
||||
Ok(acc) => acc.map(Account::from_rlp),
|
||||
Err(e) => panic!("Potential DB corruption encountered: {}", e),
|
||||
};
|
||||
|
||||
self.insert_cache(a, maybe_acc);
|
||||
match self.db.get_cached_account(a) {
|
||||
Some(Some(acc)) => self.insert_cache(a, AccountEntry::Cached(acc)),
|
||||
Some(None) => self.insert_cache(a, AccountEntry::Missing),
|
||||
None => {
|
||||
let maybe_acc = if self.db.check_account_bloom(a) {
|
||||
let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR);
|
||||
let maybe_acc = match db.get(a) {
|
||||
Ok(Some(acc)) => AccountEntry::Cached(Account::from_rlp(acc)),
|
||||
Ok(None) => AccountEntry::Missing,
|
||||
Err(e) => panic!("Potential DB corruption encountered: {}", e),
|
||||
};
|
||||
maybe_acc
|
||||
}
|
||||
else {
|
||||
AccountEntry::Missing
|
||||
};
|
||||
self.insert_cache(a, maybe_acc);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.note_cache(a);
|
||||
}
|
||||
|
||||
match self.cache.borrow_mut().get_mut(a).unwrap() {
|
||||
&mut Some(ref mut acc) => not_default(acc),
|
||||
slot @ &mut None => *slot = Some(default()),
|
||||
&mut AccountEntry::Cached(ref mut acc) => not_default(acc),
|
||||
slot => *slot = AccountEntry::Cached(default()),
|
||||
}
|
||||
|
||||
RefMut::map(self.cache.borrow_mut(), |c| {
|
||||
let account = c.get_mut(a).unwrap().as_mut().unwrap();
|
||||
if require_code {
|
||||
let addr_hash = account.address_hash(a);
|
||||
account.cache_code(&AccountDB::from_hash(self.db.as_hashdb(), addr_hash));
|
||||
match c.get_mut(a).unwrap() {
|
||||
&mut AccountEntry::Cached(ref mut account) => {
|
||||
if require_code {
|
||||
let addr_hash = account.address_hash(a);
|
||||
account.cache_code(&AccountDB::from_hash(self.db.as_hashdb(), addr_hash));
|
||||
}
|
||||
account
|
||||
},
|
||||
_ => panic!("Required account must always exist; qed"),
|
||||
}
|
||||
account
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -410,11 +613,21 @@ impl fmt::Debug for State {
|
||||
|
||||
impl Clone for State {
|
||||
fn clone(&self) -> State {
|
||||
let cache = {
|
||||
let mut cache: HashMap<Address, AccountEntry> = HashMap::new();
|
||||
for (key, val) in self.cache.borrow().iter() {
|
||||
if let Some(entry) = val.clone_dirty() {
|
||||
cache.insert(key.clone(), entry);
|
||||
}
|
||||
}
|
||||
cache
|
||||
};
|
||||
|
||||
State {
|
||||
db: self.db.boxed_clone(),
|
||||
root: self.root.clone(),
|
||||
cache: RefCell::new(self.cache.borrow().clone()),
|
||||
snapshots: RefCell::new(self.snapshots.borrow().clone()),
|
||||
cache: RefCell::new(cache),
|
||||
snapshots: RefCell::new(Vec::new()),
|
||||
account_start_nonce: self.account_start_nonce.clone(),
|
||||
trie_factory: self.trie_factory.clone(),
|
||||
}
|
||||
|
||||
231
ethcore/src/state_db.rs
Normal file
231
ethcore/src/state_db.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use lru_cache::LruCache;
|
||||
use util::journaldb::JournalDB;
|
||||
use util::hash::{H256};
|
||||
use util::hashdb::HashDB;
|
||||
use util::{Arc, Address, DBTransaction, UtilError, Mutex, Hashable, BytesConvertable};
|
||||
use account::Account;
|
||||
use bloomfilter::{Bloom, BloomJournal};
|
||||
use util::Database;
|
||||
use client::DB_COL_ACCOUNT_BLOOM;
|
||||
use byteorder::{LittleEndian, ByteOrder};
|
||||
|
||||
const STATE_CACHE_ITEMS: usize = 65536;
|
||||
|
||||
struct AccountCache {
|
||||
/// DB Account cache. `None` indicates that account is known to be missing.
|
||||
accounts: LruCache<Address, Option<Account>>,
|
||||
}
|
||||
|
||||
/// State database abstraction.
|
||||
/// Manages shared global state cache.
|
||||
/// A clone of `StateDB` may be created as canonical or not.
|
||||
/// For canonical clones cache changes are accumulated and applied
|
||||
/// on commit.
|
||||
/// For non-canonical clones cache is cleared on commit.
|
||||
pub struct StateDB {
|
||||
db: Box<JournalDB>,
|
||||
account_cache: Arc<Mutex<AccountCache>>,
|
||||
cache_overlay: Vec<(Address, Option<Account>)>,
|
||||
is_canon: bool,
|
||||
account_bloom: Arc<Mutex<Bloom>>,
|
||||
}
|
||||
|
||||
pub const ACCOUNT_BLOOM_SPACE: usize = 1048576;
|
||||
pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000;
|
||||
|
||||
pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count";
|
||||
|
||||
impl StateDB {
|
||||
pub fn load_bloom(db: &Database) -> Bloom {
|
||||
let hash_count_entry = db.get(DB_COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY)
|
||||
.expect("Low-level database error");
|
||||
|
||||
if hash_count_entry.is_none() {
|
||||
return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET);
|
||||
}
|
||||
let hash_count_bytes = hash_count_entry.unwrap();
|
||||
assert_eq!(hash_count_bytes.len(), 1);
|
||||
let hash_count = hash_count_bytes[0];
|
||||
|
||||
let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8];
|
||||
let mut key = [0u8; 8];
|
||||
for i in 0..ACCOUNT_BLOOM_SPACE / 8 {
|
||||
LittleEndian::write_u64(&mut key, i as u64);
|
||||
bloom_parts[i] = db.get(DB_COL_ACCOUNT_BLOOM, &key).expect("low-level database error")
|
||||
.and_then(|val| Some(LittleEndian::read_u64(&val[..])))
|
||||
.unwrap_or(0u64);
|
||||
}
|
||||
|
||||
let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32);
|
||||
trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.how_full(), hash_count);
|
||||
bloom
|
||||
}
|
||||
|
||||
/// Create a new instance wrapping `JournalDB`
|
||||
pub fn new(db: Box<JournalDB>) -> StateDB {
|
||||
let bloom = Self::load_bloom(db.backing());
|
||||
StateDB {
|
||||
db: db,
|
||||
account_cache: Arc::new(Mutex::new(AccountCache { accounts: LruCache::new(STATE_CACHE_ITEMS) })),
|
||||
cache_overlay: Vec::new(),
|
||||
is_canon: false,
|
||||
account_bloom: Arc::new(Mutex::new(bloom)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_account_bloom(&self, address: &Address) -> bool {
|
||||
trace!(target: "account_bloom", "Check account bloom: {:?}", address);
|
||||
let bloom = self.account_bloom.lock();
|
||||
bloom.check(address.sha3().as_slice())
|
||||
}
|
||||
|
||||
pub fn note_account_bloom(&self, address: &Address) {
|
||||
trace!(target: "account_bloom", "Note account bloom: {:?}", address);
|
||||
let mut bloom = self.account_bloom.lock();
|
||||
bloom.set(address.sha3().as_slice());
|
||||
}
|
||||
|
||||
pub fn commit_bloom(batch: &DBTransaction, journal: BloomJournal) -> Result<(), UtilError> {
|
||||
assert!(journal.hash_functions <= 255);
|
||||
try!(batch.put(DB_COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]));
|
||||
let mut key = [0u8; 8];
|
||||
let mut val = [0u8; 8];
|
||||
|
||||
for (bloom_part_index, bloom_part_value) in journal.entries {
|
||||
LittleEndian::write_u64(&mut key, bloom_part_index as u64);
|
||||
LittleEndian::write_u64(&mut val, bloom_part_value);
|
||||
try!(batch.put(DB_COL_ACCOUNT_BLOOM, &key, &val));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Commit all recent insert operations and canonical historical commits' removals from the
|
||||
/// old era to the backing database, reverting any non-canonical historical commit's inserts.
|
||||
pub fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
||||
{
|
||||
let mut bloom_lock = self.account_bloom.lock();
|
||||
try!(Self::commit_bloom(batch, bloom_lock.drain_journal()));
|
||||
}
|
||||
|
||||
let records = try!(self.db.commit(batch, now, id, end));
|
||||
if self.is_canon {
|
||||
self.commit_cache();
|
||||
} else {
|
||||
self.clear_cache();
|
||||
}
|
||||
Ok(records)
|
||||
}
|
||||
|
||||
/// Returns an interface to HashDB.
|
||||
pub fn as_hashdb(&self) -> &HashDB {
|
||||
self.db.as_hashdb()
|
||||
}
|
||||
|
||||
/// Returns an interface to mutable HashDB.
|
||||
pub fn as_hashdb_mut(&mut self) -> &mut HashDB {
|
||||
self.db.as_hashdb_mut()
|
||||
}
|
||||
|
||||
/// Clone the database.
|
||||
pub fn boxed_clone(&self) -> StateDB {
|
||||
StateDB {
|
||||
db: self.db.boxed_clone(),
|
||||
account_cache: self.account_cache.clone(),
|
||||
cache_overlay: Vec::new(),
|
||||
is_canon: false,
|
||||
account_bloom: self.account_bloom.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clone the database for a canonical state.
|
||||
pub fn boxed_clone_canon(&self) -> StateDB {
|
||||
StateDB {
|
||||
db: self.db.boxed_clone(),
|
||||
account_cache: self.account_cache.clone(),
|
||||
cache_overlay: Vec::new(),
|
||||
is_canon: true,
|
||||
account_bloom: self.account_bloom.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if pruning is enabled on the database.
|
||||
pub fn is_pruned(&self) -> bool {
|
||||
self.db.is_pruned()
|
||||
}
|
||||
|
||||
/// Heap size used.
|
||||
pub fn mem_used(&self) -> usize {
|
||||
self.db.mem_used() //TODO: + self.account_cache.lock().heap_size_of_children()
|
||||
}
|
||||
|
||||
/// Returns underlying `JournalDB`.
|
||||
pub fn journal_db(&self) -> &JournalDB {
|
||||
&*self.db
|
||||
}
|
||||
|
||||
/// Enqueue cache change.
|
||||
pub fn cache_account(&mut self, addr: Address, data: Option<Account>) {
|
||||
self.cache_overlay.push((addr, data));
|
||||
}
|
||||
|
||||
/// Apply pending cache changes.
|
||||
fn commit_cache(&mut self) {
|
||||
let mut cache = self.account_cache.lock();
|
||||
for (address, account) in self.cache_overlay.drain(..) {
|
||||
if let Some(&mut Some(ref mut existing)) = cache.accounts.get_mut(&address) {
|
||||
if let Some(new) = account {
|
||||
existing.merge_with(new);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
cache.accounts.insert(address, account);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the cache.
|
||||
pub fn clear_cache(&mut self) {
|
||||
self.cache_overlay.clear();
|
||||
let mut cache = self.account_cache.lock();
|
||||
cache.accounts.clear();
|
||||
}
|
||||
|
||||
/// Get basic copy of the cached account. Does not include storage.
|
||||
/// Returns 'None' if the state is non-canonical and cache is disabled
|
||||
/// or if the account is not cached.
|
||||
pub fn get_cached_account(&self, addr: &Address) -> Option<Option<Account>> {
|
||||
if !self.is_canon {
|
||||
return None;
|
||||
}
|
||||
let mut cache = self.account_cache.lock();
|
||||
cache.accounts.get_mut(&addr).map(|a| a.as_ref().map(|a| a.clone_basic()))
|
||||
}
|
||||
|
||||
/// Get value from a cached account.
|
||||
/// Returns 'None' if the state is non-canonical and cache is disabled
|
||||
/// or if the account is not cached.
|
||||
pub fn get_cached<F, U>(&self, a: &Address, f: F) -> Option<U>
|
||||
where F: FnOnce(Option<&mut Account>) -> U {
|
||||
if !self.is_canon {
|
||||
return None;
|
||||
}
|
||||
let mut cache = self.account_cache.lock();
|
||||
cache.accounts.get_mut(a).map(|c| f(c.as_mut()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use io::*;
|
||||
use client::{self, BlockChainClient, Client, ClientConfig};
|
||||
use common::*;
|
||||
use spec::*;
|
||||
use state_db::StateDB;
|
||||
use block::{OpenBlock, Drain};
|
||||
use blockchain::{BlockChain, Config as BlockChainConfig};
|
||||
use state::*;
|
||||
@@ -136,9 +137,9 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
|
||||
let client = Client::new(ClientConfig::default(), &test_spec, dir.as_path(), Arc::new(Miner::with_spec(&test_spec)), IoChannel::disconnected()).unwrap();
|
||||
let test_engine = &*test_spec.engine;
|
||||
|
||||
let mut db_result = get_temp_journal_db();
|
||||
let mut db_result = get_temp_state_db();
|
||||
let mut db = db_result.take();
|
||||
test_spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
|
||||
test_spec.ensure_db_good(&mut db).unwrap();
|
||||
let vm_factory = Default::default();
|
||||
let genesis_header = test_spec.genesis_header();
|
||||
|
||||
@@ -303,9 +304,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
|
||||
pub fn get_temp_state_db() -> GuardedTempResult<StateDB> {
|
||||
let temp = RandomTempPath::new();
|
||||
let journal_db = get_temp_journal_db_in(temp.as_path());
|
||||
let journal_db = get_temp_state_db_in(temp.as_path());
|
||||
|
||||
GuardedTempResult {
|
||||
_temp: temp,
|
||||
@@ -315,7 +316,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
|
||||
|
||||
pub fn get_temp_state() -> GuardedTempResult<State> {
|
||||
let temp = RandomTempPath::new();
|
||||
let journal_db = get_temp_journal_db_in(temp.as_path());
|
||||
let journal_db = get_temp_state_db_in(temp.as_path());
|
||||
|
||||
GuardedTempResult {
|
||||
_temp: temp,
|
||||
@@ -323,13 +324,14 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
|
||||
pub fn get_temp_state_db_in(path: &Path) -> StateDB {
|
||||
let db = new_db(path.to_str().expect("Only valid utf8 paths for tests."));
|
||||
journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None)
|
||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, client::DB_COL_STATE);
|
||||
StateDB::new(journal_db)
|
||||
}
|
||||
|
||||
pub fn get_temp_state_in(path: &Path) -> State {
|
||||
let journal_db = get_temp_journal_db_in(path);
|
||||
let journal_db = get_temp_state_db_in(path);
|
||||
State::new(journal_db, U256::from(0), Default::default())
|
||||
}
|
||||
|
||||
|
||||
@@ -89,10 +89,10 @@ pub fn setup_log(config: &Config) -> Result<Arc<RotatingLogger>, String> {
|
||||
let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap();
|
||||
|
||||
let with_color = if max_log_level() <= LogLevelFilter::Info {
|
||||
format!("{}{}", Colour::Black.bold().paint(timestamp), record.args())
|
||||
format!("{} {}", Colour::Black.bold().paint(timestamp), record.args())
|
||||
} else {
|
||||
let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x)));
|
||||
format!("{}{} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args())
|
||||
format!("{} {} {} {} {}", Colour::Black.bold().paint(timestamp), name, record.level(), record.target(), record.args())
|
||||
};
|
||||
|
||||
let removed_color = kill_color(with_color.as_ref());
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
!define DESCRIPTION "Fast, light, robust Ethereum implementation"
|
||||
!define VERSIONMAJOR 1
|
||||
!define VERSIONMINOR 3
|
||||
!define VERSIONBUILD 1
|
||||
!define VERSIONBUILD 2
|
||||
|
||||
!addplugindir .\
|
||||
|
||||
|
||||
@@ -19,11 +19,6 @@ use io::PanicHandler;
|
||||
use rpc_apis;
|
||||
use helpers::replace_home;
|
||||
|
||||
#[cfg(feature = "dapps")]
|
||||
pub use ethcore_dapps::Server as WebappServer;
|
||||
#[cfg(not(feature = "dapps"))]
|
||||
pub struct WebappServer;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct Configuration {
|
||||
pub enabled: bool,
|
||||
@@ -77,6 +72,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<We
|
||||
}
|
||||
|
||||
pub use self::server::setup_dapps_server;
|
||||
pub use self::server::WebappServer;
|
||||
|
||||
#[cfg(not(feature = "dapps"))]
|
||||
mod server {
|
||||
|
||||
@@ -29,7 +29,7 @@ use ethcore::migrations::Extract;
|
||||
/// Database is assumed to be at default version, when no version file is found.
|
||||
const DEFAULT_VERSION: u32 = 5;
|
||||
/// Current version of database models.
|
||||
const CURRENT_VERSION: u32 = 9;
|
||||
const CURRENT_VERSION: u32 = 10;
|
||||
/// First version of the consolidated database.
|
||||
const CONSOLIDATION_VERSION: u32 = 9;
|
||||
/// Defines how many items are migrated to the new version of database at once.
|
||||
@@ -140,7 +140,12 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig
|
||||
|
||||
/// Migrations on the consolidated database.
|
||||
fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
|
||||
let manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
||||
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
||||
// this won't ever fire, because version will be already 9
|
||||
// added because migration api should know that it should open database with 5 columns
|
||||
try!(manager.add_migration(migrations::ToV9::new(Some(5), migrations::Extract::All)).map_err(|_| Error::MigrationImpossible));
|
||||
// this will
|
||||
try!(manager.add_migration(migrations::ToV10).map_err(|_| Error::MigrationImpossible));
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
@@ -180,7 +185,6 @@ fn consolidate_database(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// Migrates database at given position with given migration rules.
|
||||
fn migrate_database(version: u32, db_path: PathBuf, mut migrations: MigrationManager) -> Result<(), Error> {
|
||||
// check if migration is needed
|
||||
@@ -215,6 +219,12 @@ fn exists(path: &Path) -> bool {
|
||||
fs::metadata(path).is_ok()
|
||||
}
|
||||
|
||||
// in-place upgrades that do nothing when called repeatedly
|
||||
fn run_inplace_upgrades(path: &Path) -> Result<(), Error> {
|
||||
try!(migrations::upgrade_account_bloom(path));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Migrates the database.
|
||||
pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> {
|
||||
// read version file.
|
||||
@@ -228,6 +238,7 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
|
||||
|
||||
// We are in the latest version, yay!
|
||||
if version == CURRENT_VERSION {
|
||||
try!(run_inplace_upgrades(consolidated_database_path(path).as_path()));
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
@@ -259,6 +270,8 @@ pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionPr
|
||||
println!("Migration finished");
|
||||
}
|
||||
|
||||
try!(run_inplace_upgrades(consolidated_database_path(path).as_path()));
|
||||
|
||||
// update version file.
|
||||
update_version(path)
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ fn codes_path(path: String) -> PathBuf {
|
||||
|
||||
pub fn new_token(path: String) -> Result<String, String> {
|
||||
generate_new_token(path)
|
||||
.map(|code| format!("This key code will authorise your System Signer UI: {}", Colour::White.bold().paint(code)))
|
||||
.map(|code| format!("This key code will authorise your System Signer UI: {}", code))
|
||||
.map_err(|err| format!("Error generating token: {:?}", err))
|
||||
}
|
||||
|
||||
|
||||
@@ -433,11 +433,14 @@ impl<C, S: ?Sized, M, EM> Eth for EthClient<C, S, M, EM> where
|
||||
try!(self.active());
|
||||
from_params::<(RpcH256,)>(params)
|
||||
.and_then(|(hash,)| {
|
||||
let miner = take_weak!(self.miner);
|
||||
let hash: H256 = hash.into();
|
||||
match miner.transaction(&hash) {
|
||||
Some(pending_tx) => to_value(&Transaction::from(pending_tx)),
|
||||
None => self.transaction(TransactionID::Hash(hash))
|
||||
let miner = take_weak!(self.miner);
|
||||
let client = take_weak!(self.client);
|
||||
let maybe_tx = client.transaction(TransactionID::Hash(hash)).map(Transaction::from)
|
||||
.or_else(|| miner.transaction(&hash).map(Transaction::from));
|
||||
match maybe_tx {
|
||||
Some(t) => to_value(&t),
|
||||
None => Ok(Value::Null),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use ethcore::client::{MiningBlockChainClient};
|
||||
|
||||
use jsonrpc_core::*;
|
||||
use v1::traits::Ethcore;
|
||||
use v1::types::{Bytes, U256, Peers, H160};
|
||||
use v1::types::{Bytes, U256, Peers, H160, Transaction};
|
||||
use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, NetworkSettings};
|
||||
use v1::helpers::params::expect_no_params;
|
||||
|
||||
@@ -200,4 +200,11 @@ impl<C, M, S: ?Sized> Ethcore for EthcoreClient<C, M, S> where M: MinerService +
|
||||
Some(ref queue) => to_value(&queue.len()),
|
||||
}
|
||||
}
|
||||
|
||||
fn pending_transactions(&self, params: Params) -> Result<Value, Error> {
|
||||
try!(self.active());
|
||||
try!(expect_no_params(params));
|
||||
|
||||
to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::<Vec<Transaction>>())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,3 +286,18 @@ fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() {
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_ethcore_pending_transactions() {
|
||||
let miner = miner_service();
|
||||
let client = client_service();
|
||||
let sync = sync_provider();
|
||||
let net = network_service();
|
||||
let io = IoHandler::new();
|
||||
io.add_delegate(ethcore_client(&client, &miner, &sync, &net).to_delegate());
|
||||
|
||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_pendingTransactions", "params":[], "id": 1}"#;
|
||||
let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#;
|
||||
|
||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||
}
|
||||
|
||||
@@ -70,6 +70,9 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
|
||||
/// Returns the value of the registrar for this network.
|
||||
fn registry_address(&self, _: Params) -> Result<Value, Error>;
|
||||
|
||||
/// Returns all transactions in transaction queue.
|
||||
fn pending_transactions(&self, _: Params) -> Result<Value, Error>;
|
||||
|
||||
/// Should be used to convert object to io delegate.
|
||||
fn to_delegate(self) -> IoDelegate<Self> {
|
||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||
@@ -90,6 +93,7 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
|
||||
delegate.add_method("ethcore_gasPriceStatistics", Ethcore::gas_price_statistics);
|
||||
delegate.add_method("ethcore_unsignedTransactionsCount", Ethcore::unsigned_transactions_count);
|
||||
delegate.add_method("ethcore_registryAddress", Ethcore::registry_address);
|
||||
delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions);
|
||||
delegate
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,10 +70,12 @@ impl Visitor for BytesVisitor {
|
||||
type Value = Bytes;
|
||||
|
||||
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
|
||||
if value.len() >= 2 && &value[0..2] == "0x" {
|
||||
Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![])))
|
||||
if value.is_empty() {
|
||||
Ok(Bytes::new(Vec::new()))
|
||||
} else if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 {
|
||||
Ok(Bytes::new(try!(FromHex::from_hex(&value[2..]).map_err(|_| Error::custom("invalid hex")))))
|
||||
} else {
|
||||
Err(Error::custom("invalid hex"))
|
||||
Err(Error::custom("invalid format"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,5 +97,32 @@ mod tests {
|
||||
let serialized = serde_json::to_string(&bytes).unwrap();
|
||||
assert_eq!(serialized, r#""0x0123456789abcdef""#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_deserialize() {
|
||||
// TODO [ToDr] Uncomment when Mist starts sending correct data
|
||||
// let bytes1: Result<Bytes, serde_json::Error> = serde_json::from_str(r#""""#);
|
||||
let bytes2: Result<Bytes, serde_json::Error> = serde_json::from_str(r#""0x123""#);
|
||||
let bytes3: Result<Bytes, serde_json::Error> = serde_json::from_str(r#""0xgg""#);
|
||||
|
||||
let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap();
|
||||
let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap();
|
||||
let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap();
|
||||
let bytes7: Bytes = serde_json::from_str(r#""0x0123456789abcdef""#).unwrap();
|
||||
|
||||
// assert!(bytes1.is_err());
|
||||
assert!(bytes2.is_err());
|
||||
assert!(bytes3.is_err());
|
||||
assert_eq!(bytes4, Bytes(vec![]));
|
||||
assert_eq!(bytes5, Bytes(vec![0x12]));
|
||||
assert_eq!(bytes6, Bytes(vec![0x1, 0x23]));
|
||||
assert_eq!(bytes7, Bytes(vec![0x1, 0x23, 0x45,0x67,0x89, 0xab, 0xcd, 0xef]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_lenient_against_the_spec_deserialize_for_empty_string_for_geth_compatibility() {
|
||||
let deserialized: Bytes = serde_json::from_str(r#""""#).unwrap();
|
||||
assert_eq!(deserialized, Bytes(Vec::new()));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,9 +15,10 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId,
|
||||
NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError};
|
||||
use util::{U256, H256, Secret, Populatable};
|
||||
use util::{U256, H256, Secret, Populatable, Bytes};
|
||||
use io::{TimerToken};
|
||||
use ethcore::client::{BlockChainClient, ChainNotify};
|
||||
use ethcore::header::BlockNumber;
|
||||
@@ -78,7 +79,11 @@ impl EthSync {
|
||||
let service = try!(NetworkService::new(try!(network_config.into_basic())));
|
||||
let sync = Arc::new(EthSync{
|
||||
network: service,
|
||||
handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain }),
|
||||
handler: Arc::new(SyncProtocolHandler {
|
||||
sync: RwLock::new(chain_sync),
|
||||
chain: chain,
|
||||
overlay: RwLock::new(HashMap::new()),
|
||||
}),
|
||||
});
|
||||
|
||||
Ok(sync)
|
||||
@@ -99,6 +104,8 @@ struct SyncProtocolHandler {
|
||||
chain: Arc<BlockChainClient>,
|
||||
/// Sync strategy
|
||||
sync: RwLock<ChainSync>,
|
||||
/// Chain overlay used to cache data such as fork block.
|
||||
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
||||
}
|
||||
|
||||
impl NetworkProtocolHandler for SyncProtocolHandler {
|
||||
@@ -107,20 +114,20 @@ impl NetworkProtocolHandler for SyncProtocolHandler {
|
||||
}
|
||||
|
||||
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||
ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain), *peer, packet_id, data);
|
||||
ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &self.overlay), *peer, packet_id, data);
|
||||
}
|
||||
|
||||
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||
self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain), *peer);
|
||||
self.sync.write().on_peer_connected(&mut NetSyncIo::new(io, &*self.chain, &self.overlay), *peer);
|
||||
}
|
||||
|
||||
fn disconnected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||
self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain), *peer);
|
||||
self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &self.overlay), *peer);
|
||||
}
|
||||
|
||||
fn timeout(&self, io: &NetworkContext, _timer: TimerToken) {
|
||||
self.sync.write().maintain_peers(&mut NetSyncIo::new(io, &*self.chain));
|
||||
self.sync.write().maintain_sync(&mut NetSyncIo::new(io, &*self.chain));
|
||||
self.sync.write().maintain_peers(&mut NetSyncIo::new(io, &*self.chain, &self.overlay));
|
||||
self.sync.write().maintain_sync(&mut NetSyncIo::new(io, &*self.chain, &self.overlay));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,7 +141,7 @@ impl ChainNotify for EthSync {
|
||||
_duration: u64)
|
||||
{
|
||||
self.network.with_context(ETH_PROTOCOL, |context| {
|
||||
let mut sync_io = NetSyncIo::new(context, &*self.handler.chain);
|
||||
let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &self.handler.overlay);
|
||||
self.handler.sync.write().chain_new_blocks(
|
||||
&mut sync_io,
|
||||
&imported,
|
||||
@@ -203,7 +210,7 @@ impl ManageNetwork for EthSync {
|
||||
|
||||
fn stop_network(&self) {
|
||||
self.network.with_context(ETH_PROTOCOL, |context| {
|
||||
let mut sync_io = NetSyncIo::new(context, &*self.handler.chain);
|
||||
let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &self.handler.overlay);
|
||||
self.handler.sync.write().abort(&mut sync_io);
|
||||
});
|
||||
self.stop();
|
||||
|
||||
@@ -18,7 +18,7 @@ use util::*;
|
||||
use network::NetworkError;
|
||||
use ethcore::header::{ Header as BlockHeader};
|
||||
|
||||
known_heap_size!(0, HeaderId, SyncBlock);
|
||||
known_heap_size!(0, HeaderId);
|
||||
|
||||
/// Block data with optional body.
|
||||
struct SyncBlock {
|
||||
@@ -26,6 +26,12 @@ struct SyncBlock {
|
||||
body: Option<Bytes>,
|
||||
}
|
||||
|
||||
impl HeapSizeOf for SyncBlock {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
self.header.heap_size_of_children() + self.body.heap_size_of_children()
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to identify header by transactions and uncles hashes
|
||||
#[derive(Eq, PartialEq, Hash)]
|
||||
struct HeaderId {
|
||||
@@ -218,10 +224,14 @@ impl BlockCollection {
|
||||
self.blocks.contains_key(hash)
|
||||
}
|
||||
|
||||
/// Return heap size.
|
||||
/// Return used heap size.
|
||||
pub fn heap_size(&self) -> usize {
|
||||
//TODO: other collections
|
||||
self.blocks.heap_size_of_children()
|
||||
self.heads.heap_size_of_children()
|
||||
+ self.blocks.heap_size_of_children()
|
||||
+ self.parents.heap_size_of_children()
|
||||
+ self.header_ids.heap_size_of_children()
|
||||
+ self.downloading_headers.heap_size_of_children()
|
||||
+ self.downloading_bodies.heap_size_of_children()
|
||||
}
|
||||
|
||||
/// Check if given block hash is marked as being downloaded.
|
||||
|
||||
@@ -449,7 +449,9 @@ impl ChainSync {
|
||||
let confirmed = match self.peers.get_mut(&peer_id) {
|
||||
Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => {
|
||||
let item_count = r.item_count();
|
||||
if item_count == 0 || (item_count == 1 && try!(r.at(0)).as_raw().sha3() == self.fork_block.unwrap().1) {
|
||||
let (fork_number, fork_hash) = self.fork_block.expect("ForkHeader request is sent only fork block is Some; qed").clone();
|
||||
let header = try!(r.at(0)).as_raw();
|
||||
if item_count == 0 || (item_count == 1 && header.sha3() == fork_hash) {
|
||||
peer.asking = PeerAsking::Nothing;
|
||||
if item_count == 0 {
|
||||
trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id);
|
||||
@@ -457,6 +459,9 @@ impl ChainSync {
|
||||
} else {
|
||||
trace!(target: "sync", "{}: Confirmed peer", peer_id);
|
||||
peer.confirmation = ForkConfirmation::Confirmed;
|
||||
if !io.chain_overlay().read().contains_key(&fork_number) {
|
||||
io.chain_overlay().write().insert(fork_number, header.to_vec());
|
||||
}
|
||||
}
|
||||
true
|
||||
} else {
|
||||
@@ -1135,8 +1140,13 @@ impl ChainSync {
|
||||
let mut count = 0;
|
||||
let mut data = Bytes::new();
|
||||
let inc = (skip + 1) as BlockNumber;
|
||||
let overlay = io.chain_overlay().read();
|
||||
while number <= last && count < max_count {
|
||||
if let Some(mut hdr) = io.chain().block_header(BlockID::Number(number)) {
|
||||
if let Some(hdr) = overlay.get(&number) {
|
||||
trace!(target: "sync", "{}: Returning cached fork header", peer_id);
|
||||
data.extend(hdr);
|
||||
count += 1;
|
||||
} else if let Some(mut hdr) = io.chain().block_header(BlockID::Number(number)) {
|
||||
data.append(&mut hdr);
|
||||
count += 1;
|
||||
}
|
||||
@@ -1336,8 +1346,10 @@ impl ChainSync {
|
||||
let mut rlp_stream = RlpStream::new_list(blocks.len());
|
||||
for block_hash in blocks {
|
||||
let mut hash_rlp = RlpStream::new_list(2);
|
||||
let number = HeaderView::new(&chain.block_header(BlockID::Hash(block_hash.clone()))
|
||||
.expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.")).number();
|
||||
let number = match chain.block_header(BlockID::Hash(block_hash.clone())) {
|
||||
Some(header) => HeaderView::new(&header).number(),
|
||||
_ => return None,
|
||||
};
|
||||
hash_rlp.append(&block_hash);
|
||||
hash_rlp.append(&number);
|
||||
rlp_stream.append_raw(hash_rlp.as_raw(), 1);
|
||||
|
||||
@@ -14,8 +14,11 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use ethcore::header::BlockNumber;
|
||||
use network::{NetworkContext, PeerId, PacketId, NetworkError};
|
||||
use ethcore::client::BlockChainClient;
|
||||
use util::{RwLock, Bytes};
|
||||
|
||||
/// IO interface for the syning handler.
|
||||
/// Provides peer connection management and an interface to the blockchain client.
|
||||
@@ -41,20 +44,24 @@ pub trait SyncIo {
|
||||
}
|
||||
/// Check if the session is expired
|
||||
fn is_expired(&self) -> bool;
|
||||
/// Return sync overlay
|
||||
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>>;
|
||||
}
|
||||
|
||||
/// Wraps `NetworkContext` and the blockchain client
|
||||
pub struct NetSyncIo<'s, 'h> where 'h: 's {
|
||||
network: &'s NetworkContext<'h>,
|
||||
chain: &'s BlockChainClient
|
||||
chain: &'s BlockChainClient,
|
||||
chain_overlay: &'s RwLock<HashMap<BlockNumber, Bytes>>,
|
||||
}
|
||||
|
||||
impl<'s, 'h> NetSyncIo<'s, 'h> {
|
||||
/// Creates a new instance from the `NetworkContext` and the blockchain client reference.
|
||||
pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> {
|
||||
pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient, chain_overlay: &'s RwLock<HashMap<BlockNumber, Bytes>>) -> NetSyncIo<'s, 'h> {
|
||||
NetSyncIo {
|
||||
network: network,
|
||||
chain: chain,
|
||||
chain_overlay: chain_overlay,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +87,10 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> {
|
||||
self.chain
|
||||
}
|
||||
|
||||
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
|
||||
self.chain_overlay
|
||||
}
|
||||
|
||||
fn peer_info(&self, peer_id: PeerId) -> String {
|
||||
self.network.peer_info(peer_id)
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ pub struct TestIo<'p> {
|
||||
pub chain: &'p mut TestBlockChainClient,
|
||||
pub queue: &'p mut VecDeque<TestPacket>,
|
||||
pub sender: Option<PeerId>,
|
||||
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
||||
}
|
||||
|
||||
impl<'p> TestIo<'p> {
|
||||
@@ -33,7 +34,8 @@ impl<'p> TestIo<'p> {
|
||||
TestIo {
|
||||
chain: chain,
|
||||
queue: queue,
|
||||
sender: sender
|
||||
sender: sender,
|
||||
overlay: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -70,6 +72,10 @@ impl<'p> SyncIo for TestIo<'p> {
|
||||
fn chain(&self) -> &BlockChainClient {
|
||||
self.chain
|
||||
}
|
||||
|
||||
fn chain_overlay(&self) -> &RwLock<HashMap<BlockNumber, Bytes>> {
|
||||
&self.overlay
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestPacket {
|
||||
|
||||
@@ -3,7 +3,7 @@ description = "Ethcore utility library"
|
||||
homepage = "http://ethcore.io"
|
||||
license = "GPL-3.0"
|
||||
name = "ethcore-util"
|
||||
version = "1.3.1"
|
||||
version = "1.3.2"
|
||||
authors = ["Ethcore <admin@ethcore.io>"]
|
||||
build = "build.rs"
|
||||
|
||||
|
||||
@@ -110,8 +110,8 @@ impl<Socket: GenericSocket> GenericConnection<Socket> {
|
||||
}
|
||||
if !self.interest.is_writable() {
|
||||
self.interest.insert(EventSet::writable());
|
||||
io.update_registration(self.token).ok();
|
||||
}
|
||||
io.update_registration(self.token).ok();
|
||||
}
|
||||
|
||||
/// Check if this connection has data to be sent.
|
||||
|
||||
@@ -77,11 +77,11 @@ pub fn clean_0x(s: &str) -> &str {
|
||||
|
||||
macro_rules! impl_hash {
|
||||
($from: ident, $size: expr) => {
|
||||
#[derive(Eq)]
|
||||
#[repr(C)]
|
||||
/// Unformatted binary data of fixed length.
|
||||
pub struct $from (pub [u8; $size]);
|
||||
|
||||
|
||||
impl From<[u8; $size]> for $from {
|
||||
fn from(bytes: [u8; $size]) -> Self {
|
||||
$from(bytes)
|
||||
@@ -263,6 +263,8 @@ macro_rules! impl_hash {
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for $from {}
|
||||
|
||||
impl PartialEq for $from {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
for i in 0..$size {
|
||||
|
||||
@@ -102,6 +102,12 @@ impl From<::std::io::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Error {
|
||||
fn from(e: String) -> Self {
|
||||
Error::Custom(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// A generalized migration from the given db to a destination db.
|
||||
pub trait Migration: 'static {
|
||||
/// Number of columns in database after the migration.
|
||||
|
||||
@@ -174,6 +174,8 @@ pub enum FromBytesError {
|
||||
DataIsTooLong,
|
||||
/// Integer-representation is non-canonically prefixed with zero byte(s).
|
||||
ZeroPrefixedInt,
|
||||
/// String representation is not utf-8
|
||||
InvalidUtf8,
|
||||
}
|
||||
|
||||
impl StdError for FromBytesError {
|
||||
@@ -199,7 +201,7 @@ pub trait FromBytes: Sized {
|
||||
|
||||
impl FromBytes for String {
|
||||
fn from_bytes(bytes: &[u8]) -> FromBytesResult<String> {
|
||||
Ok(::std::str::from_utf8(bytes).unwrap().to_owned())
|
||||
::std::str::from_utf8(bytes).map(|s| s.to_owned()).map_err(|_| FromBytesError::InvalidUtf8)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user