Merge branch 'master' into on-demand-priority

This commit is contained in:
Robert Habermeier 2017-05-10 12:18:30 +02:00
commit 0fd3e36c23
416 changed files with 24801 additions and 4853 deletions

4
.gitignore vendored
View File

@ -19,6 +19,10 @@
# mac stuff # mac stuff
.DS_Store .DS_Store
# npm stuff
npm-debug.log
node_modules
# gdb files # gdb files
.gdb_history .gdb_history

View File

@ -42,7 +42,7 @@ linux-stable:
- md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5" - md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5"
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity.md5 --body parity.md5
@ -60,9 +60,27 @@ linux-stable:
- target/release/ethstore - target/release/ethstore
- target/release/ethkey - target/release/ethkey
name: "stable-x86_64-unknown-linux-gnu_parity" name: "stable-x86_64-unknown-linux-gnu_parity"
linux-snap:
stage: build
image: parity/snapcraft:gitlab-ci
only:
- snap
- beta
script:
- rm -rf *snap
- cd scripts
- snapcraft
tags:
- rust
- rust-stable
artifacts:
paths:
- scripts/parity_master_amd64.snap
name: "stable-x86_64-unknown-linux-gnu_parity-snap"
allow_failure: true
linux-stable-debian: linux-stable-debian:
stage: build stage: build
image: ethcore/rust-debian:latest image: parity/rust-debian:gitlab-ci
only: only:
- beta - beta
- tags - tags
@ -89,7 +107,7 @@ linux-stable-debian:
- md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5" - md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5"
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/parity --body target/release/parity - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/parity --body target/release/parity
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/parity.md5 --body parity.md5
@ -146,7 +164,7 @@ linux-nightly:
allow_failure: true allow_failure: true
linux-centos: linux-centos:
stage: build stage: build
image: ethcore/rust-centos:latest image: parity/rust-centos:gitlab-ci
only: only:
- beta - beta
- tags - tags
@ -162,7 +180,7 @@ linux-centos:
- export SHA3=$(target/release/parity tools hash target/release/parity) - export SHA3=$(target/release/parity tools hash target/release/parity)
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity.md5 --body parity.md5 - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity.md5 --body parity.md5
@ -177,7 +195,7 @@ linux-centos:
name: "x86_64-unknown-centos-gnu_parity" name: "x86_64-unknown-centos-gnu_parity"
linux-i686: linux-i686:
stage: build stage: build
image: ethcore/rust-i686:latest image: parity/rust-i686:gitlab-ci
only: only:
- beta - beta
- tags - tags
@ -199,7 +217,7 @@ linux-i686:
- md5sum "parity_"$VER"_i386.deb" > "parity_"$VER"_i386.deb.md5" - md5sum "parity_"$VER"_i386.deb" > "parity_"$VER"_i386.deb.md5"
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
@ -217,7 +235,7 @@ linux-i686:
allow_failure: true allow_failure: true
linux-armv7: linux-armv7:
stage: build stage: build
image: ethcore/rust-armv7:latest image: parity/rust-armv7:gitlab-ci
only: only:
- beta - beta
- tags - tags
@ -236,7 +254,7 @@ linux-armv7:
- cat .cargo/config - cat .cargo/config
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS - cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
- arm-linux-gnueabihf-strip target/$PLATFORM/release/parity - arm-linux-gnueabihf-strip target/$PLATFORM/release/parity
- export SHA3=$(rhash --sha3-256 ~/Core/parity/target/release/parity -p %h) - export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
- md5sum target/$PLATFORM/release/parity > parity.md5 - md5sum target/$PLATFORM/release/parity > parity.md5
- sh scripts/deb-build.sh armhf - sh scripts/deb-build.sh armhf
- cp target/$PLATFORM/release/parity deb/usr/bin/parity - cp target/$PLATFORM/release/parity deb/usr/bin/parity
@ -245,7 +263,7 @@ linux-armv7:
- md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5" - md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5"
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
@ -263,7 +281,7 @@ linux-armv7:
allow_failure: true allow_failure: true
linux-arm: linux-arm:
stage: build stage: build
image: ethcore/rust-arm:latest image: parity/rust-arm:gitlab-ci
only: only:
- beta - beta
- tags - tags
@ -282,7 +300,7 @@ linux-arm:
- cat .cargo/config - cat .cargo/config
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS - cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
- arm-linux-gnueabihf-strip target/$PLATFORM/release/parity - arm-linux-gnueabihf-strip target/$PLATFORM/release/parity
- export SHA3=$(rhash --sha3-256 ~/Core/parity/target/release/parity -p %h) - export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
- md5sum target/$PLATFORM/release/parity > parity.md5 - md5sum target/$PLATFORM/release/parity > parity.md5
- sh scripts/deb-build.sh armhf - sh scripts/deb-build.sh armhf
- cp target/$PLATFORM/release/parity deb/usr/bin/parity - cp target/$PLATFORM/release/parity deb/usr/bin/parity
@ -291,7 +309,7 @@ linux-arm:
- md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5" - md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5"
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
@ -328,11 +346,11 @@ linux-armv6:
- cat .cargo/config - cat .cargo/config
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS - cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
- arm-linux-gnueabi-strip target/$PLATFORM/release/parity - arm-linux-gnueabi-strip target/$PLATFORM/release/parity
- export SHA3=$(rhash --sha3-256 ~/Core/parity/target/release/parity -p %h) - export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
- md5sum target/$PLATFORM/release/parity > parity.md5 - md5sum target/$PLATFORM/release/parity > parity.md5
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
@ -348,7 +366,7 @@ linux-armv6:
allow_failure: true allow_failure: true
linux-aarch64: linux-aarch64:
stage: build stage: build
image: ethcore/rust-aarch64:latest image: parity/rust-arm64:gitlab-ci
only: only:
- beta - beta
- tags - tags
@ -367,7 +385,7 @@ linux-aarch64:
- cat .cargo/config - cat .cargo/config
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS - cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
- aarch64-linux-gnu-strip target/$PLATFORM/release/parity - aarch64-linux-gnu-strip target/$PLATFORM/release/parity
- export SHA3=$(rhash --sha3-256 ~/Core/parity/target/release/parity -p %h) - export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
- md5sum target/$PLATFORM/release/parity > parity.md5 - md5sum target/$PLATFORM/release/parity > parity.md5
- sh scripts/deb-build.sh arm64 - sh scripts/deb-build.sh arm64
- cp target/$PLATFORM/release/parity deb/usr/bin/parity - cp target/$PLATFORM/release/parity deb/usr/bin/parity
@ -376,7 +394,7 @@ linux-aarch64:
- md5sum "parity_"$VER"_arm64.deb" > "parity_"$VER"_arm64.deb.md5" - md5sum "parity_"$VER"_arm64.deb" > "parity_"$VER"_arm64.deb.md5"
- aws configure set aws_access_key_id $s3_key - aws configure set aws_access_key_id $s3_key
- aws configure set aws_secret_access_key $s3_secret - aws configure set aws_secret_access_key $s3_secret
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi - if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM - aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5 - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_arm64.deb" --body "parity_"$VER"_arm64.deb" - aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_arm64.deb" --body "parity_"$VER"_arm64.deb"
@ -416,7 +434,7 @@ darwin:
md5sum "parity-"$VER"-macos-installer.pkg" >> "parity-"$VER"-macos-installer.pkg.md5" md5sum "parity-"$VER"-macos-installer.pkg" >> "parity-"$VER"-macos-installer.pkg.md5"
aws configure set aws_access_key_id $s3_key aws configure set aws_access_key_id $s3_key
aws configure set aws_secret_access_key $s3_secret aws configure set aws_secret_access_key $s3_secret
if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/release/parity aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/release/parity
aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5 aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
@ -470,9 +488,10 @@ windows:
- aws configure set aws_access_key_id %s3_key% - aws configure set aws_access_key_id %s3_key%
- aws configure set aws_secret_access_key %s3_secret% - aws configure set aws_secret_access_key %s3_secret%
- echo %CI_BUILD_REF_NAME% - echo %CI_BUILD_REF_NAME%
- echo %CI_BUILD_REF_NAME% | findstr /R "master" >nul 2>&1 && set S3_BUCKET=builds-parity-published || set S3_BUCKET=builds-parity - echo %CI_BUILD_REF_NAME% | findstr /R "master" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
- echo %CI_BUILD_REF_NAME% | findstr /R "beta" >nul 2>&1 && set S3_BUCKET=builds-parity-published || set S3_BUCKET=builds-parity - echo %CI_BUILD_REF_NAME% | findstr /R "beta" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
- echo %CI_BUILD_REF_NAME% | findstr /R "stable" >nul 2>&1 && set S3_BUCKET=builds-parity-published || set S3_BUCKET=builds-parity - echo %CI_BUILD_REF_NAME% | findstr /R "stable" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
- echo %CI_BUILD_REF_NAME% | findstr /R "nightly" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
- echo %S3_BUCKET% - echo %S3_BUCKET%
- aws s3 rm --recursive s3://%S3_BUCKET%/%CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc - aws s3 rm --recursive s3://%S3_BUCKET%/%CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.exe --body target\release\parity.exe - aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.exe --body target\release\parity.exe
@ -503,7 +522,11 @@ docker-build:
script: script:
- if [ "$CI_BUILD_REF_NAME" == "beta-release" ]; then DOCKER_TAG="latest"; else DOCKER_TAG=$CI_BUILD_REF_NAME; fi - if [ "$CI_BUILD_REF_NAME" == "beta-release" ]; then DOCKER_TAG="latest"; else DOCKER_TAG=$CI_BUILD_REF_NAME; fi
- docker login -u $Docker_Hub_User -p $Docker_Hub_Pass - docker login -u $Docker_Hub_User -p $Docker_Hub_Pass
- sh scripts/docker-build.sh $DOCKER_TAG - sh scripts/docker-build.sh $DOCKER_TAG ethcore
- docker logout
- docker login -u $Docker_Hub_User_Parity -p $Docker_Hub_Pass_Parity
- sh scripts/docker-build.sh $DOCKER_TAG parity
- docker logout
tags: tags:
- docker - docker
test-coverage: test-coverage:
@ -541,7 +564,7 @@ test-windows:
- git submodule update --init --recursive - git submodule update --init --recursive
script: script:
- set RUST_BACKTRACE=1 - set RUST_BACKTRACE=1
- echo cargo test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p ethcore-dapps -p ethcore-rpc -p ethcore-signer -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity %CARGOFLAGS% --verbose --release - echo cargo test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p ethcore-dapps -p parity-rpc -p ethcore-signer -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity %CARGOFLAGS% --verbose --release
tags: tags:
- rust-windows - rust-windows
allow_failure: true allow_failure: true
@ -560,7 +583,7 @@ test-rust-stable:
- rust-stable - rust-stable
js-test: js-test:
stage: test stage: test
image: ethcore/rust:stable image: parity/rust:gitlab-ci
before_script: before_script:
- git submodule update --init --recursive - git submodule update --init --recursive
- export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l) - export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l)
@ -574,11 +597,12 @@ test-rust-beta:
stage: test stage: test
only: only:
- triggers - triggers
image: ethcore/rust:beta image: parity/rust:gitlab-ci
before_script: before_script:
- git submodule update --init --recursive - git submodule update --init --recursive
- export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
script: script:
- rustup default beta
- export RUST_BACKTRACE=1 - export RUST_BACKTRACE=1
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi - if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
tags: tags:
@ -589,11 +613,12 @@ test-rust-nightly:
stage: test stage: test
only: only:
- triggers - triggers
image: ethcore/rust:nightly image: parity/rust:gitlab-ci
before_script: before_script:
- git submodule update --init --recursive - git submodule update --init --recursive
- export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l) - export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^appveyor.yml -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
script: script:
- rustup default nightly
- export RUST_BACKTRACE=1 - export RUST_BACKTRACE=1
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi - if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
tags: tags:
@ -607,12 +632,13 @@ js-release:
- beta - beta
- stable - stable
- tags - tags
image: ethcore/rust:stable image: parity/rust:gitlab-ci
before_script: before_script:
- export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l) - export JS_FILES_MODIFIED=$(git --no-pager diff --name-only $CI_BUILD_REF^ $CI_BUILD_REF | grep ^js/ | wc -l)
- echo $JS_FILES_MODIFIED - echo $JS_FILES_MODIFIED
- if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi - if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi
script: script:
- rustup default stable
- echo $JS_FILES_MODIFIED - echo $JS_FILES_MODIFIED
- if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS rebuild since no JS files modified."; else ./js/scripts/build.sh && ./js/scripts/release.sh; fi - if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS rebuild since no JS files modified."; else ./js/scripts/build.sh && ./js/scripts/release.sh; fi
tags: tags:
@ -621,8 +647,9 @@ push-release:
stage: push-release stage: push-release
only: only:
- tags - tags
image: ethcore/rust:stable image: parity/rust:gitlab-ci
script: script:
- rustup default stable
- curl --data "secret=$RELEASES_SECRET" http://update.parity.io:1337/push-release/$CI_BUILD_REF_NAME/$CI_BUILD_REF - curl --data "secret=$RELEASES_SECRET" http://update.parity.io:1337/push-release/$CI_BUILD_REF_NAME/$CI_BUILD_REF
- curl --data "secret=$RELEASES_SECRET" http://update.parity.io:1338/push-release/$CI_BUILD_REF_NAME/$CI_BUILD_REF - curl --data "secret=$RELEASES_SECRET" http://update.parity.io:1338/push-release/$CI_BUILD_REF_NAME/$CI_BUILD_REF
tags: tags:

3842
CHANGELOG.md Normal file

File diff suppressed because it is too large Load Diff

243
Cargo.lock generated
View File

@ -18,7 +18,6 @@ dependencies = [
"ethcore-ipc-tests 0.1.0", "ethcore-ipc-tests 0.1.0",
"ethcore-light 1.7.0", "ethcore-light 1.7.0",
"ethcore-logger 1.7.0", "ethcore-logger 1.7.0",
"ethcore-rpc 1.7.0",
"ethcore-secretstore 1.0.0", "ethcore-secretstore 1.0.0",
"ethcore-signer 1.7.0", "ethcore-signer 1.7.0",
"ethcore-stratum 1.7.0", "ethcore-stratum 1.7.0",
@ -38,6 +37,7 @@ dependencies = [
"parity-ipfs-api 1.7.0", "parity-ipfs-api 1.7.0",
"parity-local-store 0.1.0", "parity-local-store 0.1.0",
"parity-reactor 0.1.0", "parity-reactor 0.1.0",
"parity-rpc 1.7.0",
"parity-rpc-client 1.4.0", "parity-rpc-client 1.4.0",
"parity-updater 1.7.0", "parity-updater 1.7.0",
"path 0.1.0", "path 0.1.0",
@ -181,8 +181,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "bn" name = "bn"
version = "0.4.3" version = "0.4.4"
source = "git+https://github.com/paritytech/bn#59d848e642ad1ff0d60e39348576a6f11ee123b8" source = "git+https://github.com/paritytech/bn#b97e95a45f4484a41a515338c4f0e093bf6675e0"
dependencies = [ dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
@ -201,7 +201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "bytes" name = "bytes"
version = "0.4.1" version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -339,8 +339,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "elastic-array" name = "elastic-array"
version = "0.6.0" version = "0.7.0"
source = "git+https://github.com/paritytech/elastic-array#346f1ba5982576dab9d0b8fa178b50e1db0a21cd" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -394,7 +394,7 @@ version = "1.7.0"
dependencies = [ dependencies = [
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bn 0.4.3 (git+https://github.com/paritytech/bn)", "bn 0.4.4 (git+https://github.com/paritytech/bn)",
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
@ -592,49 +592,6 @@ dependencies = [
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "ethcore-rpc"
version = "1.7.0"
dependencies = [
"cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.7.0",
"ethcore 1.7.0",
"ethcore-devtools 1.7.0",
"ethcore-io 1.7.0",
"ethcore-ipc 1.7.0",
"ethcore-light 1.7.0",
"ethcore-logger 1.7.0",
"ethcore-util 1.7.0",
"ethcrypto 0.1.0",
"ethjson 0.1.0",
"ethkey 0.2.0",
"ethstore 0.1.0",
"ethsync 1.7.0",
"fetch 0.1.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"multihash 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-reactor 0.1.0",
"parity-updater 1.7.0",
"rlp 0.1.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
"stats 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "ethcore-secretstore" name = "ethcore-secretstore"
version = "1.0.0" version = "1.0.0"
@ -646,6 +603,7 @@ dependencies = [
"ethcore-ipc 1.7.0", "ethcore-ipc 1.7.0",
"ethcore-ipc-codegen 1.7.0", "ethcore-ipc-codegen 1.7.0",
"ethcore-ipc-nano 1.7.0", "ethcore-ipc-nano 1.7.0",
"ethcore-logger 1.7.0",
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"ethcrypto 0.1.0", "ethcrypto 0.1.0",
"ethkey 0.2.0", "ethkey 0.2.0",
@ -660,7 +618,7 @@ dependencies = [
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -674,12 +632,12 @@ dependencies = [
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-devtools 1.7.0", "ethcore-devtools 1.7.0",
"ethcore-io 1.7.0", "ethcore-io 1.7.0",
"ethcore-rpc 1.7.0",
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-rpc 1.7.0",
"parity-ui 1.7.0", "parity-ui 1.7.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -713,7 +671,7 @@ version = "1.7.0"
dependencies = [ dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.6.0 (git+https://github.com/paritytech/elastic-array)", "elastic-array 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)", "eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)",
"ethcore-bigint 0.1.2", "ethcore-bigint 0.1.2",
@ -1080,7 +1038,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "jsonrpc-core" name = "jsonrpc-core"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1092,7 +1050,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-http-server" name = "jsonrpc-http-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
@ -1105,19 +1063,20 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-ipc-server" name = "jsonrpc-ipc-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)", "parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "jsonrpc-macros" name = "jsonrpc-macros"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
@ -1127,7 +1086,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-minihttp-server" name = "jsonrpc-minihttp-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
@ -1141,7 +1100,7 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-pubsub" name = "jsonrpc-pubsub"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1151,19 +1110,20 @@ dependencies = [
[[package]] [[package]]
name = "jsonrpc-server-utils" name = "jsonrpc-server-utils"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "jsonrpc-tcp-server" name = "jsonrpc-tcp-server"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [ dependencies = [
"bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1172,6 +1132,17 @@ dependencies = [
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "jsonrpc-ws-server"
version = "7.0.0"
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#8ed20d6e094e88f707045fca2d0959f46bfd23f9"
dependencies = [
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"ws 0.6.0 (git+https://github.com/tomusdrw/ws-rs)",
]
[[package]] [[package]]
name = "kernel32-sys" name = "kernel32-sys"
version = "0.2.2" version = "0.2.2"
@ -1708,16 +1679,64 @@ dependencies = [
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "parity-rpc"
version = "1.7.0"
dependencies = [
"cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"ethash 1.7.0",
"ethcore 1.7.0",
"ethcore-devtools 1.7.0",
"ethcore-io 1.7.0",
"ethcore-ipc 1.7.0",
"ethcore-light 1.7.0",
"ethcore-logger 1.7.0",
"ethcore-util 1.7.0",
"ethcrypto 0.1.0",
"ethjson 0.1.0",
"ethkey 0.2.0",
"ethstore 0.1.0",
"ethsync 1.7.0",
"fetch 0.1.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"multihash 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-reactor 0.1.0",
"parity-updater 1.7.0",
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
"stats 0.1.0",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-timer 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "parity-rpc-client" name = "parity-rpc-client"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ethcore-rpc 1.7.0",
"ethcore-signer 1.7.0", "ethcore-signer 1.7.0",
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-rpc 1.7.0",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1728,18 +1747,19 @@ dependencies = [
[[package]] [[package]]
name = "parity-tokio-ipc" name = "parity-tokio-ipc"
version = "0.1.0" version = "0.1.5"
source = "git+https://github.com/nikvolf/parity-tokio-ipc#3d4234de6bdc78688ef803935111003080fd5375" source = "git+https://github.com/nikvolf/parity-tokio-ipc#d6c5b3cfcc913a1b9cf0f0562a10b083ceb9fb7c"
dependencies = [ dependencies = [
"bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)",
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)", "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)", "tokio-named-pipes 0.1.0 (git+https://github.com/nikvolf/tokio-named-pipes)",
"tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -1761,7 +1781,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-ui-precompiled" name = "parity-ui-precompiled"
version = "1.4.0" version = "1.4.0"
source = "git+https://github.com/paritytech/js-precompiled.git#fb346e5f2925d1b2d533eb986bd2cefb962c7a88" source = "git+https://github.com/paritytech/js-precompiled.git#cff0aec1877a4b75f51de3facee9fe439a41a90d"
dependencies = [ dependencies = [
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -2018,7 +2038,7 @@ name = "rlp"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.6.0 (git+https://github.com/paritytech/elastic-array)", "elastic-array 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore-bigint 0.1.2", "ethcore-bigint 0.1.2",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2027,7 +2047,7 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb" name = "rocksdb"
version = "0.4.5" version = "0.4.5"
source = "git+https://github.com/paritytech/rust-rocksdb#8579e896a98cdeff086392236d411dd4aa141774" source = "git+https://github.com/paritytech/rust-rocksdb#acd192f6ee017a3e8be704958617349d20ee783b"
dependencies = [ dependencies = [
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
"rocksdb-sys 0.3.0 (git+https://github.com/paritytech/rust-rocksdb)", "rocksdb-sys 0.3.0 (git+https://github.com/paritytech/rust-rocksdb)",
@ -2036,7 +2056,7 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb-sys" name = "rocksdb-sys"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/paritytech/rust-rocksdb#8579e896a98cdeff086392236d411dd4aa141774" source = "git+https://github.com/paritytech/rust-rocksdb#acd192f6ee017a3e8be704958617349d20ee783b"
dependencies = [ dependencies = [
"gcc 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.43 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2081,9 +2101,9 @@ name = "rpc-cli"
version = "1.4.0" version = "1.4.0"
dependencies = [ dependencies = [
"ethcore-bigint 0.1.2", "ethcore-bigint 0.1.2",
"ethcore-rpc 1.7.0",
"ethcore-util 1.7.0", "ethcore-util 1.7.0",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-rpc 1.7.0",
"parity-rpc-client 1.4.0", "parity-rpc-client 1.4.0",
"rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -2449,37 +2469,26 @@ name = "tokio-core"
version = "0.1.6" version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
name = "tokio-io" name = "tokio-io"
version = "0.1.0" version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "tokio-line"
version = "0.1.0"
source = "git+https://github.com/tokio-rs/tokio-line#482614ae0c82daf584727ae65a80d854fe861f81"
dependencies = [
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "tokio-minihttp" name = "tokio-minihttp"
version = "0.1.0" version = "0.1.0"
@ -2498,11 +2507,13 @@ dependencies = [
[[package]] [[package]]
name = "tokio-named-pipes" name = "tokio-named-pipes"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/alexcrichton/tokio-named-pipes#3a22f8fc9a441b548aec25bd5df3b1e0ab99fabe" source = "git+https://github.com/nikvolf/tokio-named-pipes#0b9b728eaeb0a6673c287ac7692be398fd651752"
dependencies = [ dependencies = [
"bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)", "mio-named-pipes 0.1.4 (git+https://github.com/alexcrichton/mio-named-pipes)",
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -2546,15 +2557,28 @@ dependencies = [
] ]
[[package]] [[package]]
name = "tokio-uds" name = "tokio-timer"
version = "0.1.2" version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [ dependencies = [
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tokio-uds"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]
@ -2700,7 +2724,7 @@ name = "ws"
version = "0.5.3" version = "0.5.3"
source = "git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7#30415c17f1bec53b2dcabae5b8b887df75dcbe34" source = "git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7#30415c17f1bec53b2dcabae5b8b887df75dcbe34"
dependencies = [ dependencies = [
"bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.1 (git+https://github.com/paritytech/mio)", "mio 0.6.1 (git+https://github.com/paritytech/mio)",
@ -2710,6 +2734,21 @@ dependencies = [
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "ws"
version = "0.6.0"
source = "git+https://github.com/tomusdrw/ws-rs#3259e7ca906c848beae109eb32e492871f8f397d"
dependencies = [
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "ws2_32-sys" name = "ws2_32-sys"
version = "0.2.1" version = "0.2.1"
@ -2770,10 +2809,10 @@ dependencies = [
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" "checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum blastfig 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09640e0509d97d5cdff03a9f5daf087a8e04c735c3b113a75139634a19cfc7b2" "checksum blastfig 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09640e0509d97d5cdff03a9f5daf087a8e04c735c3b113a75139634a19cfc7b2"
"checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d" "checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d"
"checksum bn 0.4.3 (git+https://github.com/paritytech/bn)" = "<none>" "checksum bn 0.4.4 (git+https://github.com/paritytech/bn)" = "<none>"
"checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8" "checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8"
"checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27"
"checksum bytes 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "46112a0060ae15e3a3f9a445428a53e082b91215b744fa27a1948842f4a64b96" "checksum bytes 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f9edb851115d67d1f18680f9326901768a91d37875b87015518357c6ce22b553"
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
"checksum cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e53e6cdfa5ca294863e8c8a32a7cdb4dc0a442c8971d47a0e75b6c27ea268a6a" "checksum cid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e53e6cdfa5ca294863e8c8a32a7cdb4dc0a442c8971d47a0e75b6c27ea268a6a"
"checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32" "checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32"
@ -2790,7 +2829,7 @@ dependencies = [
"checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" "checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8"
"checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f"
"checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91"
"checksum elastic-array 0.6.0 (git+https://github.com/paritytech/elastic-array)" = "<none>" "checksum elastic-array 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71a64decd4b8cd06654a4e643c45cb558ad554abbffd82a7e16e34f45f51b605"
"checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83" "checksum env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e3856f1697098606fc6cb97a93de88ca3f3bc35bb878c725920e6e82ecf05e83"
"checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>" "checksum eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>"
"checksum ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63df67d0af5e3cb906b667ca1a6e00baffbed87d0d8f5f78468a1f5eb3a66345" "checksum ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63df67d0af5e3cb906b667ca1a6e00baffbed87d0d8f5f78468a1f5eb3a66345"
@ -2824,6 +2863,7 @@ dependencies = [
"checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>" "checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "<none>"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
"checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f"
@ -2873,7 +2913,7 @@ dependencies = [
"checksum order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb" "checksum order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb"
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
"checksum parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1d06f6ee0fda786df3784a96ee3f0629f529b91cbfb7d142f6410e6bcd1ce2c" "checksum parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1d06f6ee0fda786df3784a96ee3f0629f529b91cbfb7d142f6410e6bcd1ce2c"
"checksum parity-tokio-ipc 0.1.0 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "<none>" "checksum parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "<none>"
"checksum parity-ui-precompiled 1.4.0 (git+https://github.com/paritytech/js-precompiled.git)" = "<none>" "checksum parity-ui-precompiled 1.4.0 (git+https://github.com/paritytech/js-precompiled.git)" = "<none>"
"checksum parity-wordlist 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07779ab11d958acbee30fcf644c99d3fae132d8fcb41282a25e1ee284097bdd2" "checksum parity-wordlist 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07779ab11d958acbee30fcf644c99d3fae132d8fcb41282a25e1ee284097bdd2"
"checksum parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aebb68eebde2c99f89592d925288600fde220177e46b5c9a91ca218d245aeedf" "checksum parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aebb68eebde2c99f89592d925288600fde220177e46b5c9a91ca218d245aeedf"
@ -2952,14 +2992,14 @@ dependencies = [
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af" "checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
"checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270" "checksum tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f7aef43048292ca0bae4ab32180e85f6202cf2816c2a210c396a84b99dab9270"
"checksum tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "99e958104a67877907c1454386d5482fe8e965a55d60be834a15a44328e7dc76" "checksum tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "99e958104a67877907c1454386d5482fe8e965a55d60be834a15a44328e7dc76"
"checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" "checksum tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "48f55df1341bb92281f229a6030bc2abffde2c7a44c6d6b802b7687dd8be0775"
"checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "<none>"
"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>" "checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>"
"checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "<none>" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/nikvolf/tokio-named-pipes)" = "<none>"
"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "<none>" "checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "<none>"
"checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808"
"checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162"
"checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" "checksum tokio-timer 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "86f33def658c14724fc13ec6289b3875a8152ee8ae767a5b1ccbded363b03db8"
"checksum tokio-uds 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bd209039933255ea77c6d7a1d18abc20b997d161acb900acca6eb74cdd049f31"
"checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6" "checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6"
"checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72" "checksum toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a442dfc13508e603c3f763274361db7f79d7469a0e95c411cde53662ab30fc72"
"checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" "checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079"
@ -2981,6 +3021,7 @@ dependencies = [
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7)" = "<none>" "checksum ws 0.5.3 (git+https://github.com/paritytech/ws-rs.git?branch=parity-1.7)" = "<none>"
"checksum ws 0.6.0 (git+https://github.com/tomusdrw/ws-rs)" = "<none>"
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
"checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453" "checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453"
"checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef" "checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef"

View File

@ -33,7 +33,6 @@ ethcore = { path = "ethcore" }
ethcore-util = { path = "util" } ethcore-util = { path = "util" }
ethcore-io = { path = "util/io" } ethcore-io = { path = "util/io" }
ethcore-devtools = { path = "devtools" } ethcore-devtools = { path = "devtools" }
ethcore-rpc = { path = "rpc" }
ethcore-signer = { path = "signer" } ethcore-signer = { path = "signer" }
ethcore-ipc = { path = "ipc/rpc" } ethcore-ipc = { path = "ipc/rpc" }
ethcore-ipc-nano = { path = "ipc/nano" } ethcore-ipc-nano = { path = "ipc/nano" }
@ -45,12 +44,13 @@ ethkey = { path = "ethkey" }
evmbin = { path = "evmbin" } evmbin = { path = "evmbin" }
rlp = { path = "util/rlp" } rlp = { path = "util/rlp" }
rpc-cli = { path = "rpc_cli" } rpc-cli = { path = "rpc_cli" }
parity-rpc-client = { path = "rpc_client" }
parity-hash-fetch = { path = "hash-fetch" } parity-hash-fetch = { path = "hash-fetch" }
parity-ipfs-api = { path = "ipfs" } parity-ipfs-api = { path = "ipfs" }
parity-updater = { path = "updater" }
parity-reactor = { path = "util/reactor" }
parity-local-store = { path = "local-store" } parity-local-store = { path = "local-store" }
parity-reactor = { path = "util/reactor" }
parity-rpc = { path = "rpc" }
parity-rpc-client = { path = "rpc_client" }
parity-updater = { path = "updater" }
path = { path = "util/path" } path = { path = "util/path" }
parity-dapps = { path = "dapps", optional = true } parity-dapps = { path = "dapps", optional = true }
@ -85,7 +85,7 @@ ui-precompiled = [
dapps = ["parity-dapps"] dapps = ["parity-dapps"]
ipc = ["ethcore/ipc", "ethsync/ipc"] ipc = ["ethcore/ipc", "ethsync/ipc"]
jit = ["ethcore/jit"] jit = ["ethcore/jit"]
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "parity-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"]
json-tests = ["ethcore/json-tests"] json-tests = ["ethcore/json-tests"]
test-heavy = ["ethcore/test-heavy"] test-heavy = ["ethcore/test-heavy"]
ethkey-cli = ["ethcore/ethkey-cli"] ethkey-cli = ["ethcore/ethkey-cli"]

View File

@ -1,9 +1,9 @@
# [Parity](https://ethcore.io/parity.html) # [Parity](https://parity.io/parity.html)
### Fast, light, and robust Ethereum implementation ### Fast, light, and robust Ethereum implementation
### [Download latest release](https://github.com/paritytech/parity/releases) ### [Download latest release](https://github.com/paritytech/parity/releases)
[![build status](https://gitlab.ethcore.io/parity/parity/badges/master/build.svg)](https://gitlab.ethcore.io/parity/parity/commits/master) [![Coverage Status][coveralls-image]][coveralls-url] [![GPLv3][license-image]][license-url] [![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) [![Coverage Status][coveralls-image]][coveralls-url] [![GPLv3][license-image]][license-url]
### Join the chat! ### Join the chat!
@ -33,7 +33,7 @@ Be sure to check out [our wiki][wiki-url] for more information.
Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and
cutting-edge Rust programming language. Parity is licensed under the GPLv3, and can be used for all your Ethereum needs. cutting-edge Rust programming language. Parity is licensed under the GPLv3, and can be used for all your Ethereum needs.
Parity comes with a built-in wallet. To access [Parity Wallet](http://127.0.0.1:8080/) simply go to http://127.0.0.1:8080/. It Parity comes with a built-in wallet. To access [Parity Wallet](http://web3.site/) simply go to http://web3.site/ (if you don't have access to the internet, but still want to use the service, you can also use http://127.0.0.1:8180/). It
includes various functionality allowing you to: includes various functionality allowing you to:
- create and manage your Ethereum accounts; - create and manage your Ethereum accounts;
- manage your Ether and any Ethereum tokens; - manage your Ether and any Ethereum tokens;

View File

@ -17,7 +17,7 @@
use std::sync::Arc; use std::sync::Arc;
use hyper; use hyper;
use ethcore_rpc::{Metadata, Origin}; use parity_rpc::{Metadata, Origin};
use jsonrpc_core::{Middleware, MetaIoHandler}; use jsonrpc_core::{Middleware, MetaIoHandler};
use jsonrpc_http_server::{self as http, AccessControlAllowOrigin, HttpMetaExtractor}; use jsonrpc_http_server::{self as http, AccessControlAllowOrigin, HttpMetaExtractor};
use jsonrpc_http_server::tokio_core::reactor::Remote; use jsonrpc_http_server::tokio_core::reactor::Remote;

View File

@ -6,7 +6,7 @@ RUN yum -y update&& \
# install rustup # install rustup
RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\ RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\
ls&&\ ls&&\
sh rustup.sh -s -- --disable-sudo sh rustup.sh --disable-sudo
# show backtraces # show backtraces
ENV RUST_BACKTRACE 1 ENV RUST_BACKTRACE 1
# set compiler # set compiler

View File

@ -123,7 +123,7 @@ impl Light {
return Err(io::Error::new(io::ErrorKind::Other, "Cache file size mismatch")); return Err(io::Error::new(io::ErrorKind::Other, "Cache file size mismatch"));
} }
let num_nodes = cache_size / NODE_BYTES; let num_nodes = cache_size / NODE_BYTES;
let mut nodes: Vec<Node> = Vec::new(); let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
nodes.resize(num_nodes, unsafe { mem::uninitialized() }); nodes.resize(num_nodes, unsafe { mem::uninitialized() });
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) }; let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
file.read_exact(buf)?; file.read_exact(buf)?;
@ -342,7 +342,6 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
} }
fn light_new(block_number: u64) -> Light { fn light_new(block_number: u64) -> Light {
let seed_compute = SeedHashCompute::new(); let seed_compute = SeedHashCompute::new();
let seedhash = seed_compute.get_seedhash(block_number); let seedhash = seed_compute.get_seedhash(block_number);
let cache_size = get_cache_size(block_number); let cache_size = get_cache_size(block_number);

View File

@ -106,6 +106,9 @@ pub trait LightChainClient: Send + Sync {
/// Get the `i`th CHT root. /// Get the `i`th CHT root.
fn cht_root(&self, i: usize) -> Option<H256>; fn cht_root(&self, i: usize) -> Option<H256>;
/// Get the EIP-86 transition block number.
fn eip86_transition(&self) -> u64;
} }
/// Something which can be treated as a `LightChainClient`. /// Something which can be treated as a `LightChainClient`.
@ -384,4 +387,8 @@ impl LightChainClient for Client {
fn cht_root(&self, i: usize) -> Option<H256> { fn cht_root(&self, i: usize) -> Option<H256> {
Client::cht_root(self, i) Client::cht_root(self, i)
} }
fn eip86_transition(&self) -> u64 {
self.engine().params().eip86_transition
}
} }

View File

@ -258,7 +258,7 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
}.fake_sign(req.from); }.fake_sign(req.from);
self.prove_transaction(transaction, id) self.prove_transaction(transaction, id)
.map(|proof| ::request::ExecutionResponse { items: proof }) .map(|(_, proof)| ::request::ExecutionResponse { items: proof })
} }
fn ready_transactions(&self) -> Vec<PendingTransaction> { fn ready_transactions(&self) -> Vec<PendingTransaction> {

View File

@ -22,9 +22,17 @@ use std::io::Write;
// TODO: `include!` these from files where they're pretty-printed? // TODO: `include!` these from files where they're pretty-printed?
const REGISTRY_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}]"#; const REGISTRY_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}]"#;
const SERVICE_TRANSACTION_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}]"#; const SERVICE_TRANSACTION_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}]"#;
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"user","type":"address"},{"name":"document","type":"bytes32"}],"name":"checkPermissions","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}]"#; const SECRETSTORE_ACL_STORAGE_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"user","type":"address"},{"name":"document","type":"bytes32"}],"name":"checkPermissions","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}]"#;
// be very careful changing these: ensure `ethcore/engines` validator sets have corresponding
// changes.
const VALIDATOR_SET_ABI: &'static str = r#"[{"constant":true,"inputs":[],"name":"transitionNonce","outputs":[{"name":"nonce","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"validators","type":"address[]"}],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_parent_hash","type":"bytes32"},{"indexed":true,"name":"_nonce","type":"uint256"},{"indexed":false,"name":"_new_set","type":"address[]"}],"name":"ValidatorsChanged","type":"event"}]"#;
const VALIDATOR_REPORT_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"validator","type":"address"},{"name":"blockNumber","type":"uint256"},{"name":"proof","type":"bytes"}],"name":"reportMalicious","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"validator","type":"address"},{"name":"blockNumber","type":"uint256"}],"name":"reportBenign","outputs":[],"payable":false,"type":"function"}]"#;
fn build_file(name: &str, abi: &str, filename: &str) { fn build_file(name: &str, abi: &str, filename: &str) {
let code = ::native_contract_generator::generate_module(name, abi).unwrap(); let code = ::native_contract_generator::generate_module(name, abi).unwrap();
@ -39,4 +47,6 @@ fn main() {
build_file("Registry", REGISTRY_ABI, "registry.rs"); build_file("Registry", REGISTRY_ABI, "registry.rs");
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs"); build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
} }

View File

@ -46,8 +46,8 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
Ok(format!(r##" Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}}; use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, BoxFuture}}; use futures::{{future, Future, IntoFuture, BoxFuture}};
use ethabi::{{Contract, Interface, Token}}; use ethabi::{{Contract, Interface, Token, Event}};
use util::{{self, Uint}}; use util::{{self, Uint}};
pub struct {name} {{ pub struct {name} {{
@ -70,6 +70,11 @@ impl {name} {{
}} }}
}} }}
/// Access the underlying `ethabi` contract.
pub fn contract(this: &Self) -> &Contract {{
&this.contract
}}
{functions} {functions}
}} }}
"##, "##,
@ -99,7 +104,10 @@ fn generate_functions(contract: &Contract) -> Result<String, Error> {
/// Inputs: {abi_inputs:?} /// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?} /// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String> pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where F: Fn(util::Address, Vec<u8>) -> U, U: Future<Item=Vec<u8>, Error=String> + Send + 'static where
F: Fn(util::Address, Vec<u8>) -> U,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send + 'static
{{ {{
let function = self.contract.function(r#"{abi_name}"#.to_string()) let function = self.contract.function(r#"{abi_name}"#.to_string())
.expect("function existence checked at compile-time; qed"); .expect("function existence checked at compile-time; qed");
@ -111,6 +119,7 @@ pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type},
}}; }};
call_future call_future
.into_future()
.and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e))) .and_then(move |out| function.decode_output(out).map_err(|e| format!("{{:?}}", e)))
.map(::std::collections::VecDeque::from) .map(::std::collections::VecDeque::from)
.and_then(|mut outputs| {decode_outputs}) .and_then(|mut outputs| {decode_outputs})
@ -299,10 +308,10 @@ fn detokenize(name: &str, output_type: ParamType) -> String {
ParamType::Bool => format!("{}.to_bool()", name), ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name), ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => { ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>()", let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>>()",
detokenize("a", *kind)); detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {})", format!("{}.to_array().and_then(|x| {{ {} }})",
name, read_array) name, read_array)
} }
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.") ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")

View File

@ -26,7 +26,11 @@ extern crate ethcore_util as util;
mod registry; mod registry;
mod service_transaction; mod service_transaction;
mod secretstore_acl_storage; mod secretstore_acl_storage;
mod validator_set;
mod validator_report;
pub use self::registry::Registry; pub use self::registry::Registry;
pub use self::service_transaction::ServiceTransactionChecker; pub use self::service_transaction::ServiceTransactionChecker;
pub use self::secretstore_acl_storage::SecretStoreAclStorage; pub use self::secretstore_acl_storage::SecretStoreAclStorage;
pub use self::validator_set::ValidatorSet;
pub use self::validator_report::ValidatorReport;

View File

@ -0,0 +1,22 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports)]
//! Validator reporting.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/validator_report.rs"));

View File

@ -0,0 +1,22 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports)]
//! Validator set contract.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/validator_set.rs"));

View File

@ -30,7 +30,8 @@
"chainID": "0x3d", "chainID": "0x3d",
"forkBlock": "0x1d4c00", "forkBlock": "0x1d4c00",
"forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f", "forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -24,7 +24,8 @@
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffffff" "eip98Transition": "0x7fffffffffffffff",
"eip86Transition": "0x7fffffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -24,7 +24,8 @@
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffffff" "eip98Transition": "0x7fffffffffffffff",
"eip86Transition": "0x7fffffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -30,7 +30,8 @@
"networkID": "0x1", "networkID": "0x1",
"chainID": "0x2", "chainID": "0x2",
"subprotocolName": "exp", "subprotocolName": "exp",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -147,7 +147,8 @@
"networkID" : "0x1", "networkID" : "0x1",
"forkBlock": "0x1d4c00", "forkBlock": "0x1d4c00",
"forkCanonHash": "0x4985f5ca3d2afbec36529aa96f74de3cc10a2a4a6c44f2157a57d2c6059a11bb", "forkCanonHash": "0x4985f5ca3d2afbec36529aa96f74de3cc10a2a4a6c44f2157a57d2c6059a11bb",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {
@ -192,6 +193,7 @@
"0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": "0x7fffffffffffff", "pricing": { "modexp": { "divisor": 20 } } } }, "0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": "0x7fffffffffffff", "pricing": { "modexp": { "divisor": 20 } } } },
"0000000000000000000000000000000000000006": { "builtin": { "name": "bn128_add", "activate_at": "0x7fffffffffffff", "pricing": { "linear": { "base": 999999, "word": 0 } } } }, "0000000000000000000000000000000000000006": { "builtin": { "name": "bn128_add", "activate_at": "0x7fffffffffffff", "pricing": { "linear": { "base": 999999, "word": 0 } } } },
"0000000000000000000000000000000000000007": { "builtin": { "name": "bn128_mul", "activate_at": "0x7fffffffffffff", "pricing": { "linear": { "base": 999999, "word": 0 } } } }, "0000000000000000000000000000000000000007": { "builtin": { "name": "bn128_mul", "activate_at": "0x7fffffffffffff", "pricing": { "linear": { "base": 999999, "word": 0 } } } },
"0000000000000000000000000000000000000008": { "builtin": { "name": "bn128_pairing", "activate_at": "0x7fffffffffffff", "pricing": { "linear": { "base": 999999, "word": 0 } } } },
"3282791d6fd713f1e94f4bfd565eaa78b3a0599d": { "3282791d6fd713f1e94f4bfd565eaa78b3a0599d": {
"balance": "1337000000000000000000" "balance": "1337000000000000000000"
}, },

View File

@ -143,7 +143,8 @@
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -23,7 +23,8 @@
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -23,7 +23,8 @@
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1", "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -30,7 +30,8 @@
"chainID": "0x3e", "chainID": "0x3e",
"forkBlock": "0x1b34d8", "forkBlock": "0x1b34d8",
"forkCanonHash": "0xf376243aeff1f256d970714c3de9fd78fa4e63cf63e32a51fe1169e375d98145", "forkCanonHash": "0xf376243aeff1f256d970714c3de9fd78fa4e63cf63e32a51fe1169e375d98145",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -23,7 +23,8 @@
"maximumExtraDataSize": "0x0400", "maximumExtraDataSize": "0x0400",
"minGasLimit": "125000", "minGasLimit": "125000",
"networkID" : "0x0", "networkID" : "0x0",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -27,7 +27,8 @@
"networkID" : "0x3", "networkID" : "0x3",
"forkBlock": 641350, "forkBlock": 641350,
"forkCanonHash": "0x8033403e9fe5811a7b6d6b469905915de1c59207ce2172cbcf5d6ff14fa6a2eb", "forkCanonHash": "0x8033403e9fe5811a7b6d6b469905915de1c59207ce2172cbcf5d6ff14fa6a2eb",
"eip98Transition": "0x7fffffffffffff" "eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

@ -1 +1 @@
Subproject commit d520593078fa0849dcd1f907e44ed0a616892e33 Subproject commit ef191fdc61cf76cdb9cdc147465fb447304b0ed2

View File

@ -139,11 +139,12 @@
} }
}, },
"params": { "params": {
"eip98Transition": "0x7fffffffffffffff",
"accountStartNonce": "0x00", "accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x1" "networkID" : "0x1",
"eip98Transition": "0x7fffffffffffff",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -7,7 +7,8 @@
"accountStartNonce": "0x0", "accountStartNonce": "0x0",
"maximumExtraDataSize": "0x20", "maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388", "minGasLimit": "0x1388",
"networkID" : "0x2" "networkID" : "0x2",
"eip86Transition": "0x7fffffffffffff"
}, },
"genesis": { "genesis": {
"seal": { "seal": {

View File

@ -36,7 +36,7 @@
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0000000000000000000000000000000000000005": { "0000000000000000000000000000000000000005": {
"balance": "1", "balance": "1",
"constructor": "6060604052604060405190810160405280737d577a597b2742b498cb5cf0c26cdcd726d39e6e73ffffffffffffffffffffffffffffffffffffffff1681526020017382a978b3f5962a5b0957d9ee9eef472ee55b42f173ffffffffffffffffffffffffffffffffffffffff1681525060009060028280548282559060005260206000209081019282156100ec579160200282015b828111156100eb5782518260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555091602001919060010190610093565b5b50905061012f91905b8082111561012b57600081816101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506001016100f5565b5090565b505034610000575b6000600090505b6000805490508110156101d5578060016000600084815481101561000057906000526020600020900160005b9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505b808060010191505061013e565b5b505b6105f2806101e76000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806335aa2e441461007b5780634d238c8e146100d8578063b7ab4db51461010b578063bfc708a01461017d578063d8f2e0bf146101b0578063fd6e1b50146101ff575b610000565b34610000576100966004808035906020019091905050610232565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3461000057610109600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061026f565b005b346100005761011861030f565b604051808060200182810382528381815181526020019150805190602001906020028083836000831461016a575b80518252602083111561016a57602082019150602081019050602083039250610146565b5050509050019250505060405180910390f35b34610000576101ae600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506103ad565b005b34610000576101bd61055b565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3461000057610230600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610581565b005b600081815481101561000057906000526020600020900160005b915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600080548060010182818154818355818115116102b8578183600052602060002091820191016102b791905b808211156102b357600081600090555060010161029b565b5090565b5b505050916000526020600020900160005b83909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505b50565b602060405190810160405280600081525060008054806020026020016040519081016040528092919081815260200182805480156103a257602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311610358575b505050505090505b90565b6000600160008054905003815481101561000057906000526020600020900160005b9054906101000a900473ffffffffffffffffffffffffffffffffffffffff166000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054815481101561000057906000526020600020900160005b6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600090556000600160008054905003815481101561000057906000526020600020900160005b6101000a81549073ffffffffffffffffffffffffffffffffffffffff021916905560008054809190600190038154818355818115116105535781836000526020600020918201910161055291905b8082111561054e576000816000905550600101610536565b5090565b5b505050505b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b80600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b505600a165627a7a7230582063a0123d8e8f5dde980af6b47e20acc5b7a1acac3e3101fa1c933471ef4b405c0029" "constructor": "60a06040819052737d577a597b2742b498cb5cf0c26cdcd726d39e6e60609081527382a978b3f5962a5b0957d9ee9eef472ee55b42f1608052600080546002825581805290927f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639182019291905b828111156100a25782518254600160a060020a031916600160a060020a0390911617825560209092019160019091019061006d565b5b506100cd9291505b808211156100c9578054600160a060020a03191681556001016100ab565b5090565b505034610000575b60005b60005481101561012f578060016000600084815481101561000057906000526020600020900160005b9054600160a060020a036101009290920a90041681526020810191909152604001600020555b6001016100d8565b5b505b610453806101416000396000f3006060604052361561005c5763ffffffff60e060020a60003504166335aa2e4481146100615780634d238c8e1461008d578063b7ab4db5146100a8578063c476dd4014610110578063d69f13bb14610172578063d8f2e0bf14610190575b610000565b34610000576100716004356101b9565b60408051600160a060020a039092168252519081900360200190f35b34610000576100a6600160a060020a03600435166101e9565b005b34610000576100b5610260565b60408051602080825283518183015283519192839290830191858101910280838382156100fd575b8051825260208311156100fd57601f1990920191602091820191016100dd565b5050509050019250505060405180910390f35b3461000057604080516020600460443581810135601f81018490048402850184019095528484526100a6948235600160a060020a03169460248035956064949293919092019181908401838280828437509496506102ca95505050505050565b005b34610000576100a6600160a060020a03600435166024356103eb565b005b3461000057610071610418565b60408051600160a060020a039092168252519081900360200190f35b600081815481101561000057906000526020600020900160005b915054906101000a9004600160a060020a031681565b6000805480600101828181548183558181151161022b5760008381526020902061022b9181019083015b808211156102275760008155600101610213565b5090565b5b505050916000526020600020900160005b8154600160a060020a038086166101009390930a92830292021916179055505b50565b60408051602081810183526000808352805484518184028101840190955280855292939290918301828280156102bf57602002820191906000526020600020905b8154600160a060020a031681526001909101906020018083116102a1575b505050505090505b90565b6000805460001981019081101561000057906000526020600020900160005b9054906101000a9004600160a060020a031660006001600086600160a060020a0316600160a060020a0316815260200190815260200160002054815481101561000057906000526020600020900160005b8154600160a060020a039384166101009290920a918202918402191617905583166000908152600160205260408120819055805460001981019081101561000057906000526020600020900160005b6101000a815490600160a060020a03021916905560008054809190600190038154818355818115116103e0576000838152602090206103e09181019083015b808211156102275760008155600101610213565b5090565b5b505050505b505050565b6002805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0384161790555b5050565b600254600160a060020a0316815600a165627a7a72305820f7876e17abd5f0927fff16788b4b3c9028ed64e6db740d788b07fc5f0a8f10920029"
}, },
"0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }, "0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { "balance": "1606938044258990275541962092341162602522202993782792835301376" },
"0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { "balance": "1606938044258990275541962092341162602522202993782792835301376" } "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }

View File

@ -78,6 +78,7 @@ pub struct AccountDB<'db> {
impl<'db> AccountDB<'db> { impl<'db> AccountDB<'db> {
/// Create a new AccountDB from an address. /// Create a new AccountDB from an address.
#[cfg(test)]
pub fn new(db: &'db HashDB, address: &Address) -> Self { pub fn new(db: &'db HashDB, address: &Address) -> Self {
Self::from_hash(db, address.sha3()) Self::from_hash(db, address.sha3())
} }
@ -131,6 +132,7 @@ pub struct AccountDBMut<'db> {
impl<'db> AccountDBMut<'db> { impl<'db> AccountDBMut<'db> {
/// Create a new AccountDB from an address. /// Create a new AccountDB from an address.
#[cfg(test)]
pub fn new(db: &'db mut HashDB, address: &Address) -> Self { pub fn new(db: &'db mut HashDB, address: &Address) -> Self {
Self::from_hash(db, address.sha3()) Self::from_hash(db, address.sha3())
} }
@ -143,7 +145,7 @@ impl<'db> AccountDBMut<'db> {
} }
} }
#[allow(dead_code)] #[cfg(test)]
pub fn immutable(&'db self) -> AccountDB<'db> { pub fn immutable(&'db self) -> AccountDB<'db> {
AccountDB { db: self.db, address_hash: self.address_hash.clone() } AccountDB { db: self.db, address_hash: self.address_hash.clone() }
} }

View File

@ -484,7 +484,11 @@ impl LockedBlock {
/// Provide a valid seal in order to turn this into a `SealedBlock`. /// Provide a valid seal in order to turn this into a `SealedBlock`.
/// This does check the validity of `seal` with the engine. /// This does check the validity of `seal` with the engine.
/// Returns the `ClosedBlock` back again if the seal is no good. /// Returns the `ClosedBlock` back again if the seal is no good.
pub fn try_seal(self, engine: &Engine, seal: Vec<Bytes>) -> Result<SealedBlock, (Error, LockedBlock)> { pub fn try_seal(
self,
engine: &Engine,
seal: Vec<Bytes>,
) -> Result<SealedBlock, (Error, LockedBlock)> {
let mut s = self; let mut s = self;
s.block.header.set_seal(seal); s.block.header.set_seal(seal);
match engine.verify_block_seal(&s.block.header) { match engine.verify_block_seal(&s.block.header) {
@ -667,8 +671,7 @@ mod tests {
use spec::*; use spec::*;
let spec = Spec::new_test(); let spec = Spec::new_test();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock(); let b = b.close_and_lock();
@ -682,16 +685,14 @@ mod tests {
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap() let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
.close_and_lock().seal(engine, vec![]).unwrap(); .close_and_lock().seal(engine, vec![]).unwrap();
let orig_bytes = b.rlp_bytes(); let orig_bytes = b.rlp_bytes();
let orig_db = b.drain(); let orig_db = b.drain();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap();
assert_eq!(e.rlp_bytes(), orig_bytes); assert_eq!(e.rlp_bytes(), orig_bytes);
@ -708,8 +709,7 @@ mod tests {
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let mut uncle1_header = Header::new(); let mut uncle1_header = Header::new();
@ -723,8 +723,7 @@ mod tests {
let orig_bytes = b.rlp_bytes(); let orig_bytes = b.rlp_bytes();
let orig_db = b.drain(); let orig_db = b.drain();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap();
let bytes = e.rlp_bytes(); let bytes = e.rlp_bytes();

View File

@ -419,6 +419,45 @@ impl<'a> Iterator for AncestryIter<'a> {
} }
} }
/// An iterator which walks all epoch transitions.
/// Returns epoch transitions.
pub struct EpochTransitionIter<'a> {
chain: &'a BlockChain,
prefix_iter: Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
}
impl<'a> Iterator for EpochTransitionIter<'a> {
type Item = (u64, EpochTransition);
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.prefix_iter.next() {
Some((key, val)) => {
// iterator may continue beyond values beginning with this
// prefix.
if !key.starts_with(&EPOCH_KEY_PREFIX[..]) { return None }
let transitions: EpochTransitions = ::rlp::decode(&val[..]);
// if there are multiple candidates, at most one will be on the
// canon chain.
for transition in transitions.candidates.into_iter() {
let is_in_canon_chain = self.chain.block_hash(transition.block_number)
.map_or(false, |hash| hash == transition.block_hash);
if is_in_canon_chain {
return Some((transitions.number, transition))
}
}
// some epochs never occurred on the main chain.
}
None => return None,
}
}
}
}
impl BlockChain { impl BlockChain {
/// Create new instance of blockchain from given Genesis. /// Create new instance of blockchain from given Genesis.
pub fn new(config: Config, genesis: &[u8], db: Arc<KeyValueDB>) -> BlockChain { pub fn new(config: Config, genesis: &[u8], db: Arc<KeyValueDB>) -> BlockChain {
@ -650,12 +689,19 @@ impl BlockChain {
/// ```json /// ```json
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 } /// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
/// ``` /// ```
pub fn tree_route(&self, from: H256, to: H256) -> TreeRoute { ///
/// If the tree route verges into pruned or unknown blocks,
/// `None` is returned.
pub fn tree_route(&self, from: H256, to: H256) -> Option<TreeRoute> {
macro_rules! otry {
($e:expr) => { match $e { Some(x) => x, None => return None } }
}
let mut from_branch = vec![]; let mut from_branch = vec![];
let mut to_branch = vec![]; let mut to_branch = vec![];
let mut from_details = self.block_details(&from).unwrap_or_else(|| panic!("0. Expected to find details for block {:?}", from)); let mut from_details = otry!(self.block_details(&from));
let mut to_details = self.block_details(&to).unwrap_or_else(|| panic!("1. Expected to find details for block {:?}", to)); let mut to_details = otry!(self.block_details(&to));
let mut current_from = from; let mut current_from = from;
let mut current_to = to; let mut current_to = to;
@ -663,13 +709,13 @@ impl BlockChain {
while from_details.number > to_details.number { while from_details.number > to_details.number {
from_branch.push(current_from); from_branch.push(current_from);
current_from = from_details.parent.clone(); current_from = from_details.parent.clone();
from_details = self.block_details(&from_details.parent).unwrap_or_else(|| panic!("2. Expected to find details for block {:?}", from_details.parent)); from_details = otry!(self.block_details(&from_details.parent));
} }
while to_details.number > from_details.number { while to_details.number > from_details.number {
to_branch.push(current_to); to_branch.push(current_to);
current_to = to_details.parent.clone(); current_to = to_details.parent.clone();
to_details = self.block_details(&to_details.parent).unwrap_or_else(|| panic!("3. Expected to find details for block {:?}", to_details.parent)); to_details = otry!(self.block_details(&to_details.parent));
} }
assert_eq!(from_details.number, to_details.number); assert_eq!(from_details.number, to_details.number);
@ -678,22 +724,22 @@ impl BlockChain {
while current_from != current_to { while current_from != current_to {
from_branch.push(current_from); from_branch.push(current_from);
current_from = from_details.parent.clone(); current_from = from_details.parent.clone();
from_details = self.block_details(&from_details.parent).unwrap_or_else(|| panic!("4. Expected to find details for block {:?}", from_details.parent)); from_details = otry!(self.block_details(&from_details.parent));
to_branch.push(current_to); to_branch.push(current_to);
current_to = to_details.parent.clone(); current_to = to_details.parent.clone();
to_details = self.block_details(&to_details.parent).unwrap_or_else(|| panic!("5. Expected to find details for block {:?}", from_details.parent)); to_details = otry!(self.block_details(&to_details.parent));
} }
let index = from_branch.len(); let index = from_branch.len();
from_branch.extend(to_branch.into_iter().rev()); from_branch.extend(to_branch.into_iter().rev());
TreeRoute { Some(TreeRoute {
blocks: from_branch, blocks: from_branch,
ancestor: current_from, ancestor: current_from,
index: index index: index
} })
} }
/// Inserts a verified, known block from the canonical chain. /// Inserts a verified, known block from the canonical chain.
@ -797,6 +843,35 @@ impl BlockChain {
} }
} }
/// Insert an epoch transition. Provide an epoch number being transitioned to
/// and epoch transition object.
///
/// The block the transition occurred at should have already been inserted into the chain.
pub fn insert_epoch_transition(&self, batch: &mut DBTransaction, epoch_num: u64, transition: EpochTransition) {
let mut transitions = match self.db.read(db::COL_EXTRA, &epoch_num) {
Some(existing) => existing,
None => EpochTransitions {
number: epoch_num,
candidates: Vec::with_capacity(1),
}
};
// ensure we don't write any duplicates.
if transitions.candidates.iter().find(|c| c.block_hash == transition.block_hash).is_none() {
transitions.candidates.push(transition);
batch.write(db::COL_EXTRA, &epoch_num, &transitions);
}
}
/// Iterate over all epoch transitions.
pub fn epoch_transitions(&self) -> EpochTransitionIter {
let iter = self.db.iter_from_prefix(db::COL_EXTRA, &EPOCH_KEY_PREFIX[..]);
EpochTransitionIter {
chain: self,
prefix_iter: iter,
}
}
/// Add a child to a given block. Assumes that the block hash is in /// Add a child to a given block. Assumes that the block hash is in
/// the chain and the child's parent is this block. /// the chain and the child's parent is this block.
/// ///
@ -879,7 +954,8 @@ impl BlockChain {
// are moved to "canon chain" // are moved to "canon chain"
// find the route between old best block and the new one // find the route between old best block and the new one
let best_hash = self.best_block_hash(); let best_hash = self.best_block_hash();
let route = self.tree_route(best_hash, parent_hash); let route = self.tree_route(best_hash, parent_hash)
.expect("blocks being imported always within recent history; qed");
assert_eq!(number, parent_details.number + 1); assert_eq!(number, parent_details.number + 1);
@ -1711,52 +1787,52 @@ mod tests {
assert_eq!(bc.block_hash(3).unwrap(), b3a_hash); assert_eq!(bc.block_hash(3).unwrap(), b3a_hash);
// test trie route // test trie route
let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone()); let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone()).unwrap();
assert_eq!(r0_1.ancestor, genesis_hash); assert_eq!(r0_1.ancestor, genesis_hash);
assert_eq!(r0_1.blocks, [b1_hash.clone()]); assert_eq!(r0_1.blocks, [b1_hash.clone()]);
assert_eq!(r0_1.index, 0); assert_eq!(r0_1.index, 0);
let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone()); let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone()).unwrap();
assert_eq!(r0_2.ancestor, genesis_hash); assert_eq!(r0_2.ancestor, genesis_hash);
assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]); assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]);
assert_eq!(r0_2.index, 0); assert_eq!(r0_2.index, 0);
let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone()); let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone()).unwrap();
assert_eq!(r1_3a.ancestor, b1_hash); assert_eq!(r1_3a.ancestor, b1_hash);
assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]); assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]);
assert_eq!(r1_3a.index, 0); assert_eq!(r1_3a.index, 0);
let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone()); let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone()).unwrap();
assert_eq!(r1_3b.ancestor, b1_hash); assert_eq!(r1_3b.ancestor, b1_hash);
assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]); assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]);
assert_eq!(r1_3b.index, 0); assert_eq!(r1_3b.index, 0);
let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone()); let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone()).unwrap();
assert_eq!(r3a_3b.ancestor, b2_hash); assert_eq!(r3a_3b.ancestor, b2_hash);
assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]); assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]);
assert_eq!(r3a_3b.index, 1); assert_eq!(r3a_3b.index, 1);
let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone()); let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone()).unwrap();
assert_eq!(r1_0.ancestor, genesis_hash); assert_eq!(r1_0.ancestor, genesis_hash);
assert_eq!(r1_0.blocks, [b1_hash.clone()]); assert_eq!(r1_0.blocks, [b1_hash.clone()]);
assert_eq!(r1_0.index, 1); assert_eq!(r1_0.index, 1);
let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone()); let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone()).unwrap();
assert_eq!(r2_0.ancestor, genesis_hash); assert_eq!(r2_0.ancestor, genesis_hash);
assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]); assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]);
assert_eq!(r2_0.index, 2); assert_eq!(r2_0.index, 2);
let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone()); let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone()).unwrap();
assert_eq!(r3a_1.ancestor, b1_hash); assert_eq!(r3a_1.ancestor, b1_hash);
assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]); assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]);
assert_eq!(r3a_1.index, 2); assert_eq!(r3a_1.index, 2);
let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone()); let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone()).unwrap();
assert_eq!(r3b_1.ancestor, b1_hash); assert_eq!(r3b_1.ancestor, b1_hash);
assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]); assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]);
assert_eq!(r3b_1.index, 2); assert_eq!(r3b_1.index, 2);
let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone()); let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone()).unwrap();
assert_eq!(r3b_3a.ancestor, b2_hash); assert_eq!(r3b_3a.ancestor, b2_hash);
assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]); assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]);
assert_eq!(r3b_3a.index, 1); assert_eq!(r3b_3a.index, 1);
@ -1791,15 +1867,13 @@ mod tests {
#[test] #[test]
fn can_contain_arbitrary_block_sequence() { fn can_contain_arbitrary_block_sequence() {
let bc_result = generate_dummy_blockchain(50); let bc = generate_dummy_blockchain(50);
let bc = bc_result.reference();
assert_eq!(bc.best_block_number(), 49); assert_eq!(bc.best_block_number(), 49);
} }
#[test] #[test]
fn can_collect_garbage() { fn can_collect_garbage() {
let bc_result = generate_dummy_blockchain(3000); let bc = generate_dummy_blockchain(3000);
let bc = bc_result.reference();
assert_eq!(bc.best_block_number(), 2999); assert_eq!(bc.best_block_number(), 2999);
let best_hash = bc.best_block_hash(); let best_hash = bc.best_block_hash();
@ -1818,15 +1892,13 @@ mod tests {
#[test] #[test]
fn can_contain_arbitrary_block_sequence_with_extra() { fn can_contain_arbitrary_block_sequence_with_extra() {
let bc_result = generate_dummy_blockchain_with_extra(25); let bc = generate_dummy_blockchain_with_extra(25);
let bc = bc_result.reference();
assert_eq!(bc.best_block_number(), 24); assert_eq!(bc.best_block_number(), 24);
} }
#[test] #[test]
fn can_contain_only_genesis_block() { fn can_contain_only_genesis_block() {
let bc_result = generate_dummy_empty_blockchain(); let bc = generate_dummy_empty_blockchain();
let bc = bc_result.reference();
assert_eq!(bc.best_block_number(), 0); assert_eq!(bc.best_block_number(), 0);
} }
@ -2110,4 +2182,58 @@ mod tests {
assert_eq!(bc.rewind(), Some(genesis_hash.clone())); assert_eq!(bc.rewind(), Some(genesis_hash.clone()));
assert_eq!(bc.rewind(), None); assert_eq!(bc.rewind(), None);
} }
#[test]
fn epoch_transitions_iter() {
use blockchain::extras::EpochTransition;
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let db = new_db();
{
let bc = new_chain(&genesis, db.clone());
let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let mut batch = db.transaction();
// create a longer fork
for i in 0..5 {
let canon_block = canon_chain.generate(&mut finalizer).unwrap();
let hash = BlockView::new(&canon_block).header_view().sha3();
bc.insert_block(&mut batch, &canon_block, vec![]);
bc.insert_epoch_transition(&mut batch, i, EpochTransition {
block_hash: hash,
block_number: i + 1,
proof: vec![],
state_proof: vec![],
});
bc.commit();
}
assert_eq!(bc.best_block_number(), 5);
let hash = BlockView::new(&uncle).header_view().sha3();
bc.insert_block(&mut batch, &uncle, vec![]);
bc.insert_epoch_transition(&mut batch, 999, EpochTransition {
block_hash: hash,
block_number: 1,
proof: vec![],
state_proof: vec![]
});
db.write(batch).unwrap();
bc.commit();
// epoch 999 not in canonical chain.
assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
// re-loading the blockchain should load the correct best block.
let bc = new_chain(&genesis, db);
assert_eq!(bc.best_block_number(), 5);
assert_eq!(bc.epoch_transitions().map(|(i, _)| i).collect::<Vec<_>>(), vec![0, 1, 2, 3, 4]);
}
} }

View File

@ -18,6 +18,7 @@
use bloomchain; use bloomchain;
use util::*; use util::*;
use util::kvdb::PREFIX_LEN as DB_PREFIX_LEN;
use rlp::*; use rlp::*;
use header::BlockNumber; use header::BlockNumber;
use receipt::Receipt; use receipt::Receipt;
@ -37,6 +38,8 @@ pub enum ExtrasIndex {
BlocksBlooms = 3, BlocksBlooms = 3,
/// Block receipts index /// Block receipts index
BlockReceipts = 4, BlockReceipts = 4,
/// Epoch transition data index.
EpochTransitions = 5,
} }
fn with_index(hash: &H256, i: ExtrasIndex) -> H264 { fn with_index(hash: &H256, i: ExtrasIndex) -> H264 {
@ -134,6 +137,36 @@ impl Key<BlockReceipts> for H256 {
} }
} }
/// length of epoch keys.
pub const EPOCH_KEY_LEN: usize = DB_PREFIX_LEN + 16;
/// epoch key prefix.
/// used to iterate over all epoch transitions in order from genesis.
pub const EPOCH_KEY_PREFIX: &'static [u8; DB_PREFIX_LEN] = &[
ExtrasIndex::EpochTransitions as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
pub struct EpochTransitionsKey([u8; EPOCH_KEY_LEN]);
impl Deref for EpochTransitionsKey {
type Target = [u8];
fn deref(&self) -> &[u8] { &self.0[..] }
}
impl Key<EpochTransitions> for u64 {
type Target = EpochTransitionsKey;
fn key(&self) -> Self::Target {
let mut arr = [0u8; EPOCH_KEY_LEN];
arr[..DB_PREFIX_LEN].copy_from_slice(&EPOCH_KEY_PREFIX[..]);
write!(&mut arr[DB_PREFIX_LEN..], "{:016x}", self)
.expect("format arg is valid; no more than 16 chars will be written; qed");
EpochTransitionsKey(arr)
}
}
/// Familial details concerning a block /// Familial details concerning a block
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct BlockDetails { pub struct BlockDetails {
@ -144,7 +177,7 @@ pub struct BlockDetails {
/// Parent block hash /// Parent block hash
pub parent: H256, pub parent: H256,
/// List of children block hashes /// List of children block hashes
pub children: Vec<H256> pub children: Vec<H256>,
} }
impl HeapSizeOf for BlockDetails { impl HeapSizeOf for BlockDetails {
@ -241,6 +274,63 @@ impl HeapSizeOf for BlockReceipts {
} }
} }
/// Candidate transitions to an epoch with specific number.
#[derive(Clone)]
pub struct EpochTransitions {
pub number: u64,
pub candidates: Vec<EpochTransition>,
}
impl Encodable for EpochTransitions {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(2).append(&self.number).append_list(&self.candidates);
}
}
impl Decodable for EpochTransitions {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
Ok(EpochTransitions {
number: rlp.val_at(0)?,
candidates: rlp.list_at(1)?,
})
}
}
#[derive(Debug, Clone)]
pub struct EpochTransition {
pub block_hash: H256, // block hash at which the transition occurred.
pub block_number: BlockNumber, // block number at which the tranition occurred.
pub proof: Vec<u8>, // "transition/epoch" proof from the engine.
pub state_proof: Vec<DBValue>, // state items necessary to regenerate proof.
}
impl Encodable for EpochTransition {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(4)
.append(&self.block_hash)
.append(&self.block_number)
.append(&self.proof)
.begin_list(self.state_proof.len());
for item in &self.state_proof {
s.append(&&**item);
}
}
}
impl Decodable for EpochTransition {
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
Ok(EpochTransition {
block_hash: rlp.val_at(0)?,
block_number: rlp.val_at(1)?,
proof: rlp.val_at(2)?,
state_proof: rlp.at(3)?.iter().map(|x| {
Ok(DBValue::from_slice(x.data()?))
}).collect::<Result<Vec<_>, _>>()?,
})
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rlp::*; use rlp::*;

View File

@ -31,5 +31,6 @@ pub mod generator;
pub use self::blockchain::{BlockProvider, BlockChain}; pub use self::blockchain::{BlockProvider, BlockChain};
pub use self::cache::CacheSize; pub use self::cache::CacheSize;
pub use self::config::Config; pub use self::config::Config;
pub use self::extras::EpochTransition;
pub use types::tree_route::TreeRoute; pub use types::tree_route::TreeRoute;
pub use self::import_route::ImportRoute; pub use self::import_route::ImportRoute;

View File

@ -158,6 +158,7 @@ fn ethereum_builtin(name: &str) -> Box<Impl> {
"modexp" => Box::new(ModexpImpl) as Box<Impl>, "modexp" => Box::new(ModexpImpl) as Box<Impl>,
"bn128_add" => Box::new(Bn128AddImpl) as Box<Impl>, "bn128_add" => Box::new(Bn128AddImpl) as Box<Impl>,
"bn128_mul" => Box::new(Bn128MulImpl) as Box<Impl>, "bn128_mul" => Box::new(Bn128MulImpl) as Box<Impl>,
"bn128_pairing" => Box::new(Bn128PairingImpl) as Box<Impl>,
_ => panic!("invalid builtin name: {}", name), _ => panic!("invalid builtin name: {}", name),
} }
} }
@ -191,6 +192,9 @@ struct Bn128AddImpl;
#[derive(Debug)] #[derive(Debug)]
struct Bn128MulImpl; struct Bn128MulImpl;
#[derive(Debug)]
struct Bn128PairingImpl;
impl Impl for Identity { impl Impl for Identity {
fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), Error> { fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), Error> {
output.write(0, input); output.write(0, input);
@ -393,11 +397,109 @@ impl Impl for Bn128MulImpl {
} }
} }
mod bn128_gen {
use bn::{AffineG1, AffineG2, Fq, Fq2, G1, G2, Gt, pairing};
lazy_static! {
pub static ref P1: G1 = G1::from(AffineG1::new(
Fq::from_str("1").expect("1 is a valid field element"),
Fq::from_str("2").expect("2 is a valid field element"),
).expect("Generator P1(1, 2) is a valid curve point"));
}
lazy_static! {
pub static ref P2: G2 = G2::from(AffineG2::new(
Fq2::new(
Fq::from_str("10857046999023057135944570762232829481370756359578518086990519993285655852781")
.expect("a valid field element"),
Fq::from_str("11559732032986387107991004021392285783925812861821192530917403151452391805634")
.expect("a valid field element"),
),
Fq2::new(
Fq::from_str("8495653923123431417604973247489272438418190587263600148770280649306958101930")
.expect("a valid field element"),
Fq::from_str("4082367875863433681332203403145435568316851327593401208105741076214120093531")
.expect("a valid field element"),
),
).expect("the generator P2(10857046999023057135944570762232829481370756359578518086990519993285655852781 + 11559732032986387107991004021392285783925812861821192530917403151452391805634i, 8495653923123431417604973247489272438418190587263600148770280649306958101930 + 4082367875863433681332203403145435568316851327593401208105741076214120093531i) is a valid curve point"));
}
lazy_static! {
pub static ref P1_P2_PAIRING: Gt = pairing(P1.clone(), P2.clone());
}
}
impl Impl for Bn128PairingImpl {
/// Can fail if:
/// - input length is not a multiple of 192
/// - any of odd points does not belong to bn128 curve
/// - any of even points does not belong to the twisted bn128 curve over the field F_p^2 = F_p[i] / (i^2 + 1)
fn execute(&self, input: &[u8], output: &mut BytesRef) -> Result<(), Error> {
use bn::{AffineG1, AffineG2, Fq, Fq2, pairing, G1, G2, Gt};
let elements = input.len() / 192; // (a, b_a, b_b - each 64-byte affine coordinates)
if input.len() % 192 != 0 {
return Err("Invalid input length, must be multiple of 192 (3 * (32*2))".into())
}
let ret_val = if input.len() == 0 {
U256::one()
} else {
let mut vals = Vec::new();
for idx in 0..elements {
let a_x = Fq::from_slice(&input[idx*192..idx*192+32])
.map_err(|_| Error::from("Invalid a argument x coordinate"))?;
let a_y = Fq::from_slice(&input[idx*192+32..idx*192+64])
.map_err(|_| Error::from("Invalid a argument y coordinate"))?;
let b_b_x = Fq::from_slice(&input[idx*192+64..idx*192+96])
.map_err(|_| Error::from("Invalid b argument imaginary coeff x coordinate"))?;
let b_b_y = Fq::from_slice(&input[idx*192+96..idx*192+128])
.map_err(|_| Error::from("Invalid b argument imaginary coeff y coordinate"))?;
let b_a_x = Fq::from_slice(&input[idx*192+128..idx*192+160])
.map_err(|_| Error::from("Invalid b argument real coeff x coordinate"))?;
let b_a_y = Fq::from_slice(&input[idx*192+160..idx*192+192])
.map_err(|_| Error::from("Invalid b argument real coeff y coordinate"))?;
vals.push((
G1::from(
AffineG1::new(a_x, a_y).map_err(|_| Error::from("Invalid a argument - not on curve"))?
),
G2::from(
AffineG2::new(
Fq2::new(b_a_x, b_a_y),
Fq2::new(b_b_x, b_b_y),
).map_err(|_| Error::from("Invalid b argument - not on curve"))?
),
));
};
let mul = vals.into_iter().fold(Gt::one(), |s, (a, b)| s * pairing(a, b));
if mul == *bn128_gen::P1_P2_PAIRING {
U256::one()
} else {
U256::zero()
}
};
let mut buf = [0u8; 32];
ret_val.to_big_endian(&mut buf);
output.write(0, &buf);
Ok(())
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Builtin, Linear, ethereum_builtin, Pricer, Modexp}; use super::{Builtin, Linear, ethereum_builtin, Pricer, Modexp};
use ethjson; use ethjson;
use util::{U256, BytesRef}; use util::{U256, BytesRef};
use rustc_serialize::hex::FromHex;
#[test] #[test]
fn identity() { fn identity() {
@ -714,6 +816,81 @@ mod tests {
} }
} }
fn builtin_pairing() -> Builtin {
Builtin {
pricer: Box::new(Linear { base: 0, word: 0 }),
native: ethereum_builtin("bn128_pairing"),
activate_at: 0,
}
}
fn empty_test(f: Builtin, expected: Vec<u8>) {
let mut empty = [0u8; 0];
let input = BytesRef::Fixed(&mut empty);
let mut output = vec![0u8; expected.len()];
f.execute(&input[..], &mut BytesRef::Fixed(&mut output[..])).expect("Builtin should not fail");
assert_eq!(output, expected);
}
fn error_test(f: Builtin, input: &[u8], msg_contains: Option<&str>) {
let mut output = vec![0u8; 64];
let res = f.execute(input, &mut BytesRef::Fixed(&mut output[..]));
if let Some(msg) = msg_contains {
if let Err(e) = res {
if !e.0.contains(msg) {
panic!("There should be error containing '{}' here, but got: '{}'", msg, e.0);
}
}
} else {
assert!(res.is_err(), "There should be built-in error here");
}
}
fn bytes(s: &'static str) -> Vec<u8> {
FromHex::from_hex(s).expect("static str should contain valid hex bytes")
}
#[test]
fn bn128_pairing_empty() {
// should not fail, because empty input is a valid input of 0 elements
empty_test(
builtin_pairing(),
bytes("0000000000000000000000000000000000000000000000000000000000000001"),
);
}
#[test]
fn bn128_pairing_notcurve() {
// should fail - point not on curve
error_test(
builtin_pairing(),
&bytes("\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111"
),
Some("not on curve"),
);
}
#[test]
fn bn128_pairing_fragmented() {
// should fail - input length is invalid
error_test(
builtin_pairing(),
&bytes("\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
111111111111111111111111111111"
),
Some("Invalid input length"),
);
}
#[test] #[test]
#[should_panic] #[should_panic]

View File

@ -32,13 +32,13 @@ use util::kvdb::*;
// other // other
use basic_types::Seal; use basic_types::Seal;
use block::*; use block::*;
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use blockchain::{BlockChain, BlockProvider, EpochTransition, TreeRoute, ImportRoute};
use blockchain::extras::TransactionAddress; use blockchain::extras::TransactionAddress;
use client::Error as ClientError; use client::Error as ClientError;
use client::{ use client::{
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode, MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
ChainNotify, PruningInfo, ChainNotify, PruningInfo, ProvingBlockChainClient,
}; };
use encoded; use encoded;
use engines::Engine; use engines::Engine;
@ -49,7 +49,7 @@ use evm::{Factory as EvmFactory, Schedule};
use executive::{Executive, Executed, TransactOptions, contract_address}; use executive::{Executive, Executed, TransactOptions, contract_address};
use factory::Factories; use factory::Factories;
use futures::{future, Future}; use futures::{future, Future};
use header::BlockNumber; use header::{BlockNumber, Header};
use io::*; use io::*;
use log_entry::LocalizedLogEntry; use log_entry::LocalizedLogEntry;
use miner::{Miner, MinerService, TransactionImportResult}; use miner::{Miner, MinerService, TransactionImportResult};
@ -166,8 +166,7 @@ impl Client {
db: Arc<KeyValueDB>, db: Arc<KeyValueDB>,
miner: Arc<Miner>, miner: Arc<Miner>,
message_channel: IoChannel<ClientIoMessage>, message_channel: IoChannel<ClientIoMessage>,
) -> Result<Arc<Client>, ClientError> { ) -> Result<Arc<Client>, ::error::Error> {
let trie_spec = match config.fat_db { let trie_spec = match config.fat_db {
true => TrieSpec::Fat, true => TrieSpec::Fat,
false => TrieSpec::Secure, false => TrieSpec::Secure,
@ -247,17 +246,27 @@ impl Client {
exit_handler: Mutex::new(None), exit_handler: Mutex::new(None),
}); });
// prune old states.
{ {
let state_db = client.state_db.lock().boxed_clone(); let state_db = client.state_db.lock().boxed_clone();
let chain = client.chain.read(); let chain = client.chain.read();
client.prune_ancient(state_db, &chain)?; client.prune_ancient(state_db, &chain)?;
} }
// ensure genesis epoch proof in the DB.
{
let chain = client.chain.read();
client.generate_epoch_proof(&spec.genesis_header(), 0, &*chain);
}
if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) { if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) {
trace!(target: "client", "Found registrar at {}", reg_addr); trace!(target: "client", "Found registrar at {}", reg_addr);
let registrar = Registry::new(reg_addr); let registrar = Registry::new(reg_addr);
*client.registrar.lock() = Some(registrar); *client.registrar.lock() = Some(registrar);
} }
// ensure buffered changes are flushed.
client.db.read().flush().map_err(ClientError::Database)?;
Ok(client) Ok(client)
} }
@ -368,7 +377,7 @@ impl Client {
let chain = self.chain.read(); let chain = self.chain.read();
// Check the block isn't so old we won't be able to enact it. // Check the block isn't so old we won't be able to enact it.
let best_block_number = chain.best_block_number(); let best_block_number = chain.best_block_number();
if best_block_number >= self.history && header.number() <= best_block_number - self.history { if self.pruning_info().earliest_state > header.number() {
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
return Err(()); return Err(());
} }
@ -380,6 +389,12 @@ impl Client {
return Err(()); return Err(());
}; };
let verify_external_result = self.verifier.verify_block_external(header, &block.bytes, engine);
if let Err(e) = verify_external_result {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(());
};
// Check if Parent is in chain // Check if Parent is in chain
let chain_has_parent = chain.block_header(header.parent_hash()); let chain_has_parent = chain.block_header(header.parent_hash());
if let Some(parent) = chain_has_parent { if let Some(parent) = chain_has_parent {
@ -398,7 +413,7 @@ impl Client {
// Final Verification // Final Verification
if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 5 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
} }
@ -569,6 +584,22 @@ impl Client {
//let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); //let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
let mut batch = DBTransaction::new(); let mut batch = DBTransaction::new();
// generate validation proof if the engine requires them.
// TODO: make conditional?
let entering_new_epoch = {
use engines::EpochChange;
match self.engine.is_epoch_end(block.header(), Some(block_data), Some(&receipts)) {
EpochChange::Yes(e, _) => Some((block.header().clone(), e)),
EpochChange::No => None,
EpochChange::Unsure(_) => {
warn!(target: "client", "Detected invalid engine implementation.");
warn!(target: "client", "Engine claims to require more block data, but everything provided.");
None
}
}
};
// CHECK! I *think* this is fine, even if the state_root is equal to another // CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number. // already-imported block of the same number.
// TODO: Prove it with a test. // TODO: Prove it with a test.
@ -576,6 +607,7 @@ impl Client {
state.journal_under(&mut batch, number, hash).expect("DB commit failed"); state.journal_under(&mut batch, number, hash).expect("DB commit failed");
let route = chain.insert_block(&mut batch, block_data, receipts); let route = chain.insert_block(&mut batch, block_data, receipts);
self.tracedb.read().import(&mut batch, TraceImportRequest { self.tracedb.read().import(&mut batch, TraceImportRequest {
traces: traces.into(), traces: traces.into(),
block_hash: hash.clone(), block_hash: hash.clone(),
@ -595,9 +627,58 @@ impl Client {
warn!("Failed to prune ancient state data: {}", e); warn!("Failed to prune ancient state data: {}", e);
} }
if let Some((header, epoch)) = entering_new_epoch {
self.generate_epoch_proof(&header, epoch, &chain);
}
route route
} }
// generate an epoch transition proof at the given block, and write it into the given blockchain.
fn generate_epoch_proof(&self, header: &Header, epoch_number: u64, chain: &BlockChain) {
use std::cell::RefCell;
use std::collections::BTreeSet;
let mut batch = DBTransaction::new();
let hash = header.hash();
debug!(target: "client", "Generating validation proof for block {}", hash);
// proof is two-part. state items read in lexicographical order,
// and the secondary "proof" part.
let read_values = RefCell::new(BTreeSet::new());
let block_id = BlockId::Hash(hash.clone());
let proof = {
let call = |a, d| {
let tx = self.contract_call_tx(block_id, a, d);
let (result, items) = self.prove_transaction(tx, block_id)
.ok_or_else(|| format!("Unable to make call to generate epoch proof."))?;
read_values.borrow_mut().extend(items);
Ok(result)
};
self.engine.epoch_proof(&header, &call)
};
// insert into database, using the generated proof.
match proof {
Ok(proof) => {
chain.insert_epoch_transition(&mut batch, epoch_number, EpochTransition {
block_hash: hash.clone(),
block_number: header.number(),
proof: proof,
state_proof: read_values.into_inner().into_iter().collect(),
});
self.db.read().write_buffered(batch);
}
Err(e) => {
warn!(target: "client", "Error generating epoch change proof for block {}: {}", hash, e);
warn!(target: "client", "Snapshots generated by this node will be incomplete.");
}
}
}
// prune ancient states until below the memory limit or only the minimum amount remain. // prune ancient states until below the memory limit or only the minimum amount remain.
fn prune_ancient(&self, mut state_db: StateDB, chain: &BlockChain) -> Result<(), ClientError> { fn prune_ancient(&self, mut state_db: StateDB, chain: &BlockChain) -> Result<(), ClientError> {
let number = match state_db.journal_db().latest_era() { let number = match state_db.journal_db().latest_era() {
@ -689,7 +770,7 @@ impl Client {
let db = self.state_db.lock().boxed_clone(); let db = self.state_db.lock().boxed_clone();
// early exit for pruned blocks // early exit for pruned blocks
if db.is_pruned() && self.chain.read().best_block_number() >= block_number + self.history { if db.is_pruned() && self.pruning_info().earliest_state > block_number {
return None; return None;
} }
@ -790,7 +871,7 @@ impl Client {
let best_block_number = self.chain_info().best_block_number; let best_block_number = self.chain_info().best_block_number;
let block_number = self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))?; let block_number = self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))?;
if best_block_number > self.history + block_number && db.is_pruned() { if db.is_pruned() && self.pruning_info().earliest_state > block_number {
return Err(snapshot::Error::OldBlockPrunedDB.into()); return Err(snapshot::Error::OldBlockPrunedDB.into());
} }
@ -814,7 +895,7 @@ impl Client {
}, },
}; };
snapshot::take_snapshot(&self.chain.read(), start_hash, db.as_hashdb(), writer, p)?; snapshot::take_snapshot(&*self.engine, &self.chain.read(), start_hash, db.as_hashdb(), writer, p)?;
Ok(()) Ok(())
} }
@ -865,6 +946,20 @@ impl Client {
} }
} }
} }
// transaction for calling contracts from services like engine.
// from the null sender, with 50M gas.
fn contract_call_tx(&self, block_id: BlockId, address: Address, data: Bytes) -> SignedTransaction {
let from = Address::default();
Transaction {
nonce: self.nonce(&from, block_id).unwrap_or_else(|| self.engine.account_start_nonce()),
action: Action::Call(address),
gas: U256::from(50_000_000),
gas_price: U256::default(),
value: U256::default(),
data: data,
}.fake_sign(from)
}
} }
impl snapshot::DatabaseRestore for Client { impl snapshot::DatabaseRestore for Client {
@ -960,7 +1055,7 @@ impl BlockChainClient for Client {
return Err(err.into()) return Err(err.into())
} }
} }
let lower = t.gas_required(&self.engine.schedule(&env_info)).into(); let lower = t.gas_required(&self.engine.schedule(env_info.number)).into();
if cond(lower)? { if cond(lower)? {
trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower); trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower);
return Ok(lower) return Ok(lower)
@ -1259,7 +1354,7 @@ impl BlockChainClient for Client {
.collect(); .collect();
match (transaction, previous_receipts) { match (transaction, previous_receipts) {
(Some(transaction), Some(previous_receipts)) => { (Some(transaction), Some(previous_receipts)) => {
Some(transaction_receipt(transaction, previous_receipts)) Some(transaction_receipt(self.engine(), transaction, previous_receipts))
}, },
_ => None, _ => None,
} }
@ -1269,7 +1364,7 @@ impl BlockChainClient for Client {
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> { fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
let chain = self.chain.read(); let chain = self.chain.read();
match chain.is_known(from) && chain.is_known(to) { match chain.is_known(from) && chain.is_known(to) {
true => Some(chain.tree_route(from.clone(), to.clone())), true => chain.tree_route(from.clone(), to.clone()),
false => None false => None
} }
} }
@ -1455,15 +1550,7 @@ impl BlockChainClient for Client {
} }
fn call_contract(&self, block_id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String> { fn call_contract(&self, block_id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String> {
let from = Address::default(); let transaction = self.contract_call_tx(block_id, address, data);
let transaction = Transaction {
nonce: self.latest_nonce(&from),
action: Action::Call(address),
gas: U256::from(50_000_000),
gas_price: U256::default(),
value: U256::default(),
data: data,
}.fake_sign(from);
self.call(&transaction, block_id, Default::default()) self.call(&transaction, block_id, Default::default())
.map_err(|e| format!("{:?}", e)) .map_err(|e| format!("{:?}", e))
@ -1501,11 +1588,15 @@ impl BlockChainClient for Client {
}) })
.and_then(|a| if a.is_zero() { None } else { Some(a) }) .and_then(|a| if a.is_zero() { None } else { Some(a) })
} }
fn eip86_transition(&self) -> u64 {
self.engine().params().eip86_transition
}
} }
impl MiningBlockChainClient for Client { impl MiningBlockChainClient for Client {
fn latest_schedule(&self) -> Schedule { fn latest_schedule(&self) -> Schedule {
self.engine.schedule(&self.latest_env_info()) self.engine.schedule(self.latest_env_info().number)
} }
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
@ -1615,7 +1706,7 @@ impl MayPanic for Client {
} }
} }
impl ::client::ProvingBlockChainClient for Client { impl ProvingBlockChainClient for Client {
fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec<Bytes>, H256)> { fn prove_storage(&self, key1: H256, key2: H256, id: BlockId) -> Option<(Vec<Bytes>, H256)> {
self.state_at(id) self.state_at(id)
.and_then(move |state| state.prove_storage(key1, key2).ok()) .and_then(move |state| state.prove_storage(key1, key2).ok())
@ -1626,7 +1717,7 @@ impl ::client::ProvingBlockChainClient for Client {
.and_then(move |state| state.prove_account(key1).ok()) .and_then(move |state| state.prove_account(key1).ok())
} }
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<Vec<DBValue>> { fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) { let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) {
(Some(s), Some(e)) => (s, e), (Some(s), Some(e)) => (s, e),
_ => return None, _ => return None,
@ -1641,8 +1732,9 @@ impl ::client::ProvingBlockChainClient for Client {
let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options); let res = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&transaction, options);
match res { match res {
Err(ExecutionError::Internal(_)) => return None, Err(ExecutionError::Internal(_)) => None,
_ => return Some(state.drop().1.extract_proof()), Err(_) => Some((Vec::new(), state.drop().1.extract_proof())),
Ok(res) => Some((res.output, state.drop().1.extract_proof())),
} }
} }
} }
@ -1655,7 +1747,7 @@ impl Drop for Client {
/// Returns `LocalizedReceipt` given `LocalizedTransaction` /// Returns `LocalizedReceipt` given `LocalizedTransaction`
/// and a vector of receipts from given block up to transaction index. /// and a vector of receipts from given block up to transaction index.
fn transaction_receipt(mut tx: LocalizedTransaction, mut receipts: Vec<Receipt>) -> LocalizedReceipt { fn transaction_receipt(engine: &Engine, mut tx: LocalizedTransaction, mut receipts: Vec<Receipt>) -> LocalizedReceipt {
assert_eq!(receipts.len(), tx.transaction_index + 1, "All previous receipts are provided."); assert_eq!(receipts.len(), tx.transaction_index + 1, "All previous receipts are provided.");
let sender = tx.sender(); let sender = tx.sender();
@ -1674,12 +1766,12 @@ fn transaction_receipt(mut tx: LocalizedTransaction, mut receipts: Vec<Receipt>)
transaction_hash: transaction_hash, transaction_hash: transaction_hash,
transaction_index: transaction_index, transaction_index: transaction_index,
block_hash: block_hash, block_hash: block_hash,
block_number:block_number, block_number: block_number,
cumulative_gas_used: receipt.gas_used, cumulative_gas_used: receipt.gas_used,
gas_used: receipt.gas_used - prior_gas_used, gas_used: receipt.gas_used - prior_gas_used,
contract_address: match tx.action { contract_address: match tx.action {
Action::Call(_) => None, Action::Call(_) => None,
Action::Create => Some(contract_address(&sender, &tx.nonce)) Action::Create => Some(contract_address(engine.create_address_scheme(block_number), &sender, &tx.nonce, &tx.data.sha3()))
}, },
logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry { logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry {
entry: log, entry: log,
@ -1717,7 +1809,7 @@ mod tests {
// Separate thread uncommited transaction // Separate thread uncommited transaction
let go = Arc::new(AtomicBool::new(false)); let go = Arc::new(AtomicBool::new(false));
let go_thread = go.clone(); let go_thread = go.clone();
let another_client = client.reference().clone(); let another_client = client.clone();
thread::spawn(move || { thread::spawn(move || {
let mut batch = DBTransaction::new(); let mut batch = DBTransaction::new();
another_client.chain.read().insert_block(&mut batch, &new_block, Vec::new()); another_client.chain.read().insert_block(&mut batch, &new_block, Vec::new());
@ -1739,10 +1831,12 @@ mod tests {
use receipt::{Receipt, LocalizedReceipt}; use receipt::{Receipt, LocalizedReceipt};
use transaction::{Transaction, LocalizedTransaction, Action}; use transaction::{Transaction, LocalizedTransaction, Action};
use util::Hashable; use util::Hashable;
use tests::helpers::TestEngine;
// given // given
let key = KeyPair::from_secret_slice(&"test".sha3()).unwrap(); let key = KeyPair::from_secret_slice(&"test".sha3()).unwrap();
let secret = key.secret(); let secret = key.secret();
let engine = TestEngine::new(0);
let block_number = 1; let block_number = 1;
let block_hash = 5.into(); let block_hash = 5.into();
@ -1786,7 +1880,7 @@ mod tests {
}]; }];
// when // when
let receipt = transaction_receipt(transaction, receipts); let receipt = transaction_receipt(&engine, transaction, receipts);
// then // then
assert_eq!(receipt, LocalizedReceipt { assert_eq!(receipt, LocalizedReceipt {

View File

@ -353,7 +353,7 @@ pub fn get_temp_state_db() -> GuardedTempResult<StateDB> {
impl MiningBlockChainClient for TestBlockChainClient { impl MiningBlockChainClient for TestBlockChainClient {
fn latest_schedule(&self) -> Schedule { fn latest_schedule(&self) -> Schedule {
Schedule::new_post_eip150(24576, true, true, true) Schedule::new_post_eip150(24576, true, true, true, true)
} }
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock { fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
@ -756,6 +756,8 @@ impl BlockChainClient for TestBlockChainClient {
fn registrar_address(&self) -> Option<Address> { None } fn registrar_address(&self) -> Option<Address> { None }
fn registry_address(&self, _name: String) -> Option<Address> { None } fn registry_address(&self, _name: String) -> Option<Address> { None }
fn eip86_transition(&self) -> u64 { u64::max_value() }
} }
impl ProvingBlockChainClient for TestBlockChainClient { impl ProvingBlockChainClient for TestBlockChainClient {
@ -767,7 +769,7 @@ impl ProvingBlockChainClient for TestBlockChainClient {
None None
} }
fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<Vec<DBValue>> { fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
None None
} }
} }

View File

@ -272,6 +272,9 @@ pub trait BlockChainClient : Sync + Send {
/// Get the address of a particular blockchain service, if available. /// Get the address of a particular blockchain service, if available.
fn registry_address(&self, name: String) -> Option<Address>; fn registry_address(&self, name: String) -> Option<Address>;
/// Get the EIP-86 transition block number.
fn eip86_transition(&self) -> u64;
} }
impl IpcConfig for BlockChainClient { } impl IpcConfig for BlockChainClient { }
@ -324,5 +327,7 @@ pub trait ProvingBlockChainClient: BlockChainClient {
fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec<Bytes>, BasicAccount)>; fn prove_account(&self, key1: H256, id: BlockId) -> Option<(Vec<Bytes>, BasicAccount)>;
/// Prove execution of a transaction at the given block. /// Prove execution of a transaction at the given block.
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<Vec<DBValue>>; /// Returns the output of the call and a vector of database items necessary
/// to reproduce it.
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)>;
} }

View File

@ -25,19 +25,18 @@ use rlp::{UntrustedRlp, encode};
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::*; use block::*;
use spec::CommonParams; use spec::CommonParams;
use engines::{Engine, Seal, EngineError}; use engines::{Call, Engine, Seal, EngineError};
use header::Header; use header::{Header, BlockNumber};
use error::{Error, TransactionError, BlockError}; use error::{Error, TransactionError, BlockError};
use evm::Schedule; use evm::Schedule;
use ethjson; use ethjson;
use io::{IoContext, IoHandler, TimerToken, IoService}; use io::{IoContext, IoHandler, TimerToken, IoService};
use env_info::EnvInfo;
use builtin::Builtin; use builtin::Builtin;
use transaction::UnverifiedTransaction; use transaction::UnverifiedTransaction;
use client::{Client, EngineClient}; use client::{Client, EngineClient};
use state::CleanupMode; use state::CleanupMode;
use super::signer::EngineSigner; use super::signer::EngineSigner;
use super::validator_set::{ValidatorSet, new_validator_set}; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
/// `AuthorityRound` params. /// `AuthorityRound` params.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -75,27 +74,78 @@ impl From<ethjson::spec::AuthorityRoundParams> for AuthorityRoundParams {
} }
} }
/// Engine using `AuthorityRound` proof-of-work consensus algorithm, suitable for Ethereum // Helper for managing the step.
/// mainnet chains in the Olympic, Frontier and Homestead eras. #[derive(Debug)]
struct Step {
calibrate: bool, // whether calibration is enabled.
inner: AtomicUsize,
duration: Duration,
}
impl Step {
fn load(&self) -> usize { self.inner.load(AtomicOrdering::SeqCst) }
fn duration_remaining(&self) -> Duration {
let now = unix_now();
let step_end = self.duration * (self.load() as u32 + 1);
if step_end > now {
step_end - now
} else {
Duration::from_secs(0)
}
}
fn increment(&self) {
self.inner.fetch_add(1, AtomicOrdering::SeqCst);
}
fn calibrate(&self) {
if self.calibrate {
let new_step = unix_now().as_secs() / self.duration.as_secs();
self.inner.store(new_step as usize, AtomicOrdering::SeqCst);
}
}
fn is_future(&self, given: usize) -> bool {
if given > self.load() + 1 {
// Make absolutely sure that the given step is correct.
self.calibrate();
given > self.load() + 1
} else {
false
}
}
}
/// Engine using `AuthorityRound` proof-of-authority BFT consensus.
pub struct AuthorityRound { pub struct AuthorityRound {
params: CommonParams, params: CommonParams,
gas_limit_bound_divisor: U256, gas_limit_bound_divisor: U256,
block_reward: U256, block_reward: U256,
registrar: Address, registrar: Address,
step_duration: Duration,
builtins: BTreeMap<Address, Builtin>, builtins: BTreeMap<Address, Builtin>,
transition_service: IoService<()>, transition_service: IoService<()>,
step: AtomicUsize, step: Arc<Step>,
proposed: AtomicBool, proposed: AtomicBool,
client: RwLock<Option<Weak<EngineClient>>>, client: RwLock<Option<Weak<EngineClient>>>,
signer: EngineSigner, signer: EngineSigner,
validators: Box<ValidatorSet>, validators: Box<ValidatorSet>,
/// Is this Engine just for testing (prevents step calibration).
calibrate_step: bool,
validate_score_transition: u64, validate_score_transition: u64,
eip155_transition: u64, eip155_transition: u64,
} }
// header-chain validator.
struct EpochVerifier {
epoch_number: u64,
step: Arc<Step>,
subchain_validators: SimpleList,
}
impl super::EpochVerifier for EpochVerifier {
fn epoch_number(&self) -> u64 { self.epoch_number.clone() }
fn verify_light(&self, header: &Header) -> Result<(), Error> {
// always check the seal since it's fast.
// nothing heavier to do.
verify_external(header, &self.subchain_validators, &*self.step)
}
}
fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> { fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> {
UntrustedRlp::new(&header.seal().get(0).expect("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)")).as_val() UntrustedRlp::new(&header.seal().get(0).expect("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)")).as_val()
} }
@ -104,6 +154,26 @@ fn header_signature(header: &Header) -> Result<Signature, ::rlp::DecoderError> {
UntrustedRlp::new(&header.seal().get(1).expect("was checked with verify_block_basic; has 2 fields; qed")).as_val::<H520>().map(Into::into) UntrustedRlp::new(&header.seal().get(1).expect("was checked with verify_block_basic; has 2 fields; qed")).as_val::<H520>().map(Into::into)
} }
fn verify_external(header: &Header, validators: &ValidatorSet, step: &Step) -> Result<(), Error> {
let header_step = header_step(header)?;
// Give one step slack if step is lagging, double vote is still not possible.
if step.is_future(header_step) {
trace!(target: "engine", "verify_block_unordered: block from the future");
validators.report_benign(header.author(), header.number());
Err(BlockError::InvalidSeal)?
} else {
let proposer_signature = header_signature(header)?;
let correct_proposer = validators.get(header.parent_hash(), header_step);
if !verify_address(&correct_proposer, &proposer_signature, &header.bare_hash())? {
trace!(target: "engine", "verify_block_unordered: bad proposer for step: {}", header_step);
Err(EngineError::NotProposer(Mismatch { expected: correct_proposer, found: header.author().clone() }))?
} else {
Ok(())
}
}
}
trait AsMillis { trait AsMillis {
fn as_millis(&self) -> u64; fn as_millis(&self) -> u64;
} }
@ -125,15 +195,17 @@ impl AuthorityRound {
gas_limit_bound_divisor: our_params.gas_limit_bound_divisor, gas_limit_bound_divisor: our_params.gas_limit_bound_divisor,
block_reward: our_params.block_reward, block_reward: our_params.block_reward,
registrar: our_params.registrar, registrar: our_params.registrar,
step_duration: our_params.step_duration,
builtins: builtins, builtins: builtins,
transition_service: IoService::<()>::start()?, transition_service: IoService::<()>::start()?,
step: AtomicUsize::new(initial_step), step: Arc::new(Step {
inner: AtomicUsize::new(initial_step),
calibrate: our_params.start_step.is_none(),
duration: our_params.step_duration,
}),
proposed: AtomicBool::new(false), proposed: AtomicBool::new(false),
client: RwLock::new(None), client: RwLock::new(None),
signer: Default::default(), signer: Default::default(),
validators: new_validator_set(our_params.validators), validators: new_validator_set(our_params.validators),
calibrate_step: our_params.start_step.is_none(),
validate_score_transition: our_params.validate_score_transition, validate_score_transition: our_params.validate_score_transition,
eip155_transition: our_params.eip155_transition, eip155_transition: our_params.eip155_transition,
}); });
@ -145,22 +217,6 @@ impl AuthorityRound {
Ok(engine) Ok(engine)
} }
fn calibrate_step(&self) {
if self.calibrate_step {
self.step.store((unix_now().as_secs() / self.step_duration.as_secs()) as usize, AtomicOrdering::SeqCst);
}
}
fn remaining_step_duration(&self) -> Duration {
let now = unix_now();
let step_end = self.step_duration * (self.step.load(AtomicOrdering::SeqCst) as u32 + 1);
if step_end > now {
step_end - now
} else {
Duration::from_secs(0)
}
}
fn step_proposer(&self, bh: &H256, step: usize) -> Address { fn step_proposer(&self, bh: &H256, step: usize) -> Address {
self.validators.get(bh, step) self.validators.get(bh, step)
} }
@ -168,16 +224,6 @@ impl AuthorityRound {
fn is_step_proposer(&self, bh: &H256, step: usize, address: &Address) -> bool { fn is_step_proposer(&self, bh: &H256, step: usize, address: &Address) -> bool {
self.step_proposer(bh, step) == *address self.step_proposer(bh, step) == *address
} }
fn is_future_step(&self, step: usize) -> bool {
if step > self.step.load(AtomicOrdering::SeqCst) + 1 {
// Make absolutely sure that the step is correct.
self.calibrate_step();
step > self.step.load(AtomicOrdering::SeqCst) + 1
} else {
false
}
}
} }
fn unix_now() -> Duration { fn unix_now() -> Duration {
@ -193,7 +239,8 @@ const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
impl IoHandler<()> for TransitionHandler { impl IoHandler<()> for TransitionHandler {
fn initialize(&self, io: &IoContext<()>) { fn initialize(&self, io: &IoContext<()>) {
if let Some(engine) = self.engine.upgrade() { if let Some(engine) = self.engine.upgrade() {
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, engine.remaining_step_duration().as_millis()) let remaining = engine.step.duration_remaining();
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, remaining.as_millis())
.unwrap_or_else(|e| warn!(target: "engine", "Failed to start consensus step timer: {}.", e)) .unwrap_or_else(|e| warn!(target: "engine", "Failed to start consensus step timer: {}.", e))
} }
} }
@ -202,7 +249,8 @@ impl IoHandler<()> for TransitionHandler {
if timer == ENGINE_TIMEOUT_TOKEN { if timer == ENGINE_TIMEOUT_TOKEN {
if let Some(engine) = self.engine.upgrade() { if let Some(engine) = self.engine.upgrade() {
engine.step(); engine.step();
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, engine.remaining_step_duration().as_millis()) let remaining = engine.step.duration_remaining();
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, remaining.as_millis())
.unwrap_or_else(|e| warn!(target: "engine", "Failed to restart consensus step timer: {}.", e)) .unwrap_or_else(|e| warn!(target: "engine", "Failed to restart consensus step timer: {}.", e))
} }
} }
@ -224,7 +272,7 @@ impl Engine for AuthorityRound {
fn builtins(&self) -> &BTreeMap<Address, Builtin> { &self.builtins } fn builtins(&self) -> &BTreeMap<Address, Builtin> { &self.builtins }
fn step(&self) { fn step(&self) {
self.step.fetch_add(1, AtomicOrdering::SeqCst); self.step.increment();
self.proposed.store(false, AtomicOrdering::SeqCst); self.proposed.store(false, AtomicOrdering::SeqCst);
if let Some(ref weak) = *self.client.read() { if let Some(ref weak) = *self.client.read() {
if let Some(c) = weak.upgrade() { if let Some(c) = weak.upgrade() {
@ -241,13 +289,14 @@ impl Engine for AuthorityRound {
] ]
} }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule { fn schedule(&self, block_number: BlockNumber) -> Schedule {
Schedule::new_post_eip150(usize::max_value(), true, true, true) let eip86 = block_number >= self.params.eip86_transition;
Schedule::new_post_eip150(usize::max_value(), true, true, true, eip86)
} }
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) { fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) {
// Chain scoring: total weight is sqrt(U256::max_value())*height - step // Chain scoring: total weight is sqrt(U256::max_value())*height - step
let new_difficulty = U256::from(U128::max_value()) + header_step(parent).expect("Header has been verified; qed").into() - self.step.load(AtomicOrdering::SeqCst).into(); let new_difficulty = U256::from(U128::max_value()) + header_step(parent).expect("Header has been verified; qed").into() - self.step.load().into();
header.set_difficulty(new_difficulty); header.set_difficulty(new_difficulty);
header.set_gas_limit({ header.set_gas_limit({
let gas_limit = parent.gas_limit().clone(); let gas_limit = parent.gas_limit().clone();
@ -271,7 +320,7 @@ impl Engine for AuthorityRound {
fn generate_seal(&self, block: &ExecutedBlock) -> Seal { fn generate_seal(&self, block: &ExecutedBlock) -> Seal {
if self.proposed.load(AtomicOrdering::SeqCst) { return Seal::None; } if self.proposed.load(AtomicOrdering::SeqCst) { return Seal::None; }
let header = block.header(); let header = block.header();
let step = self.step.load(AtomicOrdering::SeqCst); let step = self.step.load();
if self.is_step_proposer(header.parent_hash(), step, header.author()) { if self.is_step_proposer(header.parent_hash(), step, header.author()) {
if let Ok(signature) = self.signer.sign(header.bare_hash()) { if let Ok(signature) = self.signer.sign(header.bare_hash()) {
trace!(target: "engine", "generate_seal: Issuing a block for step {}.", step); trace!(target: "engine", "generate_seal: Issuing a block for step {}.", step);
@ -319,33 +368,20 @@ impl Engine for AuthorityRound {
Ok(()) Ok(())
} }
/// Do the validator and gas limit validation. /// Do the step and gas limit validation.
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
let step = header_step(header)?; let step = header_step(header)?;
// Give one step slack if step is lagging, double vote is still not possible.
if self.is_future_step(step) {
trace!(target: "engine", "verify_block_unordered: block from the future");
self.validators.report_benign(header.author());
Err(BlockError::InvalidSeal)?
} else {
let proposer_signature = header_signature(header)?;
let correct_proposer = self.step_proposer(header.parent_hash(), step);
if !verify_address(&correct_proposer, &proposer_signature, &header.bare_hash())? {
trace!(target: "engine", "verify_block_unordered: bad proposer for step: {}", step);
Err(EngineError::NotProposer(Mismatch { expected: correct_proposer, found: header.author().clone() }))?
}
}
// Do not calculate difficulty for genesis blocks. // Do not calculate difficulty for genesis blocks.
if header.number() == 0 { if header.number() == 0 {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }))); return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
} }
// Check if parent is from a previous step. // Ensure header is from the step after parent.
let parent_step = header_step(parent)?; let parent_step = header_step(parent)?;
if step == parent_step { if step <= parent_step {
trace!(target: "engine", "Multiple blocks proposed for step {}.", step); trace!(target: "engine", "Multiple blocks proposed for step {}.", parent_step);
self.validators.report_malicious(header.author()); self.validators.report_malicious(header.author(), header.number(), Default::default());
Err(EngineError::DoubleVote(header.author().clone()))?; Err(EngineError::DoubleVote(header.author().clone()))?;
} }
@ -358,6 +394,34 @@ impl Engine for AuthorityRound {
Ok(()) Ok(())
} }
// Check the validators.
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
verify_external(header, &*self.validators, &*self.step)
}
// the proofs we need just allow us to get the full validator set.
fn epoch_proof(&self, header: &Header, caller: &Call) -> Result<Bytes, Error> {
self.validators.epoch_proof(header, caller)
.map_err(|e| EngineError::InsufficientProof(e).into())
}
fn is_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
-> super::EpochChange
{
self.validators.is_epoch_end(header, block, receipts)
}
fn epoch_verifier(&self, header: &Header, proof: &[u8]) -> Result<Box<super::EpochVerifier>, Error> {
// extract a simple list from the proof.
let (num, simple_list) = self.validators.epoch_set(header, proof)?;
Ok(Box::new(EpochVerifier {
epoch_number: num,
step: self.step.clone(),
subchain_validators: simple_list,
}))
}
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> result::Result<(), Error> { fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> result::Result<(), Error> {
t.check_low_s()?; t.check_low_s()?;
@ -387,7 +451,6 @@ impl Engine for AuthorityRound {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use util::*; use util::*;
use env_info::EnvInfo;
use header::Header; use header::Header;
use error::{Error, BlockError}; use error::{Error, BlockError};
use ethkey::Secret; use ethkey::Secret;
@ -408,15 +471,7 @@ mod tests {
#[test] #[test]
fn can_return_schedule() { fn can_return_schedule() {
let engine = Spec::new_test_round().engine; let engine = Spec::new_test_round().engine;
let schedule = engine.schedule(&EnvInfo { let schedule = engine.schedule(10000000);
number: 10000000,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0); assert!(schedule.stack_limit > 0);
} }
@ -441,7 +496,7 @@ mod tests {
let mut header: Header = Header::default(); let mut header: Header = Header::default();
header.set_seal(vec![encode(&H520::default()).to_vec()]); header.set_seal(vec![encode(&H520::default()).to_vec()]);
let verify_result = engine.verify_block_family(&header, &Default::default(), None); let verify_result = engine.verify_block_external(&header, None);
assert!(verify_result.is_err()); assert!(verify_result.is_err());
} }
@ -454,8 +509,8 @@ mod tests {
let spec = Spec::new_test_round(); let spec = Spec::new_test_round();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let db1 = spec.ensure_db_good(get_temp_state_db().take(), &Default::default()).unwrap(); let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db2 = spec.ensure_db_good(get_temp_state_db().take(), &Default::default()).unwrap(); let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b1 = b1.close_and_lock(); let b1 = b1.close_and_lock();
@ -495,9 +550,11 @@ mod tests {
// Two validators. // Two validators.
// Spec starts with step 2. // Spec starts with step 2.
header.set_seal(vec![encode(&2usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); header.set_seal(vec![encode(&2usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_err()); assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
assert!(engine.verify_block_external(&header, None).is_err());
header.set_seal(vec![encode(&1usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); header.set_seal(vec![encode(&1usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
assert!(engine.verify_block_external(&header, None).is_ok());
} }
#[test] #[test]
@ -520,7 +577,33 @@ mod tests {
// Spec starts with step 2. // Spec starts with step 2.
header.set_seal(vec![encode(&1usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); header.set_seal(vec![encode(&1usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok()); assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
assert!(engine.verify_block_external(&header, None).is_ok());
header.set_seal(vec![encode(&5usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); header.set_seal(vec![encode(&5usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
assert!(engine.verify_block_external(&header, None).is_err());
}
#[test]
fn rejects_step_backwards() {
let tap = AccountProvider::transient_provider();
let addr = tap.insert_account(Secret::from_slice(&"0".sha3()).unwrap(), "0").unwrap();
let mut parent_header: Header = Header::default();
parent_header.set_seal(vec![encode(&4usize).to_vec()]);
parent_header.set_gas_limit(U256::from_str("222222").unwrap());
let mut header: Header = Header::default();
header.set_number(1);
header.set_gas_limit(U256::from_str("222222").unwrap());
header.set_author(addr);
let engine = Spec::new_test_round().engine;
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
// Two validators.
// Spec starts with step 2.
header.set_seal(vec![encode(&5usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
header.set_seal(vec![encode(&3usize).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
assert!(engine.verify_block_family(&header, &parent_header, None).is_err()); assert!(engine.verify_block_family(&header, &parent_header, None).is_err());
} }
} }

View File

@ -23,15 +23,14 @@ use account_provider::AccountProvider;
use block::*; use block::*;
use builtin::Builtin; use builtin::Builtin;
use spec::CommonParams; use spec::CommonParams;
use engines::{Engine, Seal}; use engines::{Engine, EngineError, Seal, Call, EpochChange};
use env_info::EnvInfo;
use error::{BlockError, Error}; use error::{BlockError, Error};
use evm::Schedule; use evm::Schedule;
use ethjson; use ethjson;
use header::Header; use header::{Header, BlockNumber};
use client::Client; use client::Client;
use super::signer::EngineSigner; use super::signer::EngineSigner;
use super::validator_set::{ValidatorSet, new_validator_set}; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
/// `BasicAuthority` params. /// `BasicAuthority` params.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -51,8 +50,32 @@ impl From<ethjson::spec::BasicAuthorityParams> for BasicAuthorityParams {
} }
} }
/// Engine using `BasicAuthority` proof-of-work consensus algorithm, suitable for Ethereum struct EpochVerifier {
/// mainnet chains in the Olympic, Frontier and Homestead eras. epoch_number: u64,
list: SimpleList,
}
impl super::EpochVerifier for EpochVerifier {
fn epoch_number(&self) -> u64 { self.epoch_number.clone() }
fn verify_light(&self, header: &Header) -> Result<(), Error> {
verify_external(header, &self.list)
}
}
fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Error> {
use rlp::UntrustedRlp;
// Check if the signature belongs to a validator, can depend on parent state.
let sig = UntrustedRlp::new(&header.seal()[0]).as_val::<H520>()?;
let signer = public_to_address(&recover(&sig.into(), &header.bare_hash())?);
match validators.contains(header.parent_hash(), &signer) {
false => Err(BlockError::InvalidSeal.into()),
true => Ok(())
}
}
/// Engine using `BasicAuthority`, trivial proof-of-authority consensus.
pub struct BasicAuthority { pub struct BasicAuthority {
params: CommonParams, params: CommonParams,
gas_limit_bound_divisor: U256, gas_limit_bound_divisor: U256,
@ -86,7 +109,7 @@ impl Engine for BasicAuthority {
/// Additional engine-specific information for the user/developer concerning `header`. /// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, _header: &Header) -> BTreeMap<String, String> { map!["signature".to_owned() => "TODO".to_owned()] } fn extra_info(&self, _header: &Header) -> BTreeMap<String, String> { map!["signature".to_owned() => "TODO".to_owned()] }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule { fn schedule(&self, _block_number: BlockNumber) -> Schedule {
Schedule::new_homestead() Schedule::new_homestead()
} }
@ -138,14 +161,6 @@ impl Engine for BasicAuthority {
} }
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
use rlp::UntrustedRlp;
// Check if the signature belongs to a validator, can depend on parent state.
let sig = UntrustedRlp::new(&header.seal()[0]).as_val::<H520>()?;
let signer = public_to_address(&recover(&sig.into(), &header.bare_hash())?);
if !self.validators.contains(header.parent_hash(), &signer) {
return Err(BlockError::InvalidSeal)?;
}
// Do not calculate difficulty for genesis blocks. // Do not calculate difficulty for genesis blocks.
if header.number() == 0 { if header.number() == 0 {
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }))); return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
@ -164,6 +179,32 @@ impl Engine for BasicAuthority {
Ok(()) Ok(())
} }
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
verify_external(header, &*self.validators)
}
// the proofs we need just allow us to get the full validator set.
fn epoch_proof(&self, header: &Header, caller: &Call) -> Result<Bytes, Error> {
self.validators.epoch_proof(header, caller)
.map_err(|e| EngineError::InsufficientProof(e).into())
}
fn is_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
-> EpochChange
{
self.validators.is_epoch_end(header, block, receipts)
}
fn epoch_verifier(&self, header: &Header, proof: &[u8]) -> Result<Box<super::EpochVerifier>, Error> {
// extract a simple list from the proof.
let (num, simple_list) = self.validators.epoch_set(header, proof)?;
Ok(Box::new(EpochVerifier {
epoch_number: num,
list: simple_list,
}))
}
fn register_client(&self, client: Weak<Client>) { fn register_client(&self, client: Weak<Client>) {
self.validators.register_contract(client); self.validators.register_contract(client);
} }
@ -181,7 +222,6 @@ impl Engine for BasicAuthority {
mod tests { mod tests {
use util::*; use util::*;
use block::*; use block::*;
use env_info::EnvInfo;
use error::{BlockError, Error}; use error::{BlockError, Error};
use tests::helpers::*; use tests::helpers::*;
use account_provider::AccountProvider; use account_provider::AccountProvider;
@ -206,16 +246,7 @@ mod tests {
#[test] #[test]
fn can_return_schedule() { fn can_return_schedule() {
let engine = new_test_authority().engine; let engine = new_test_authority().engine;
let schedule = engine.schedule(&EnvInfo { let schedule = engine.schedule(10000000);
number: 10000000,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0); assert!(schedule.stack_limit > 0);
} }
@ -252,8 +283,7 @@ mod tests {
let engine = &*spec.engine; let engine = &*spec.engine;
engine.set_signer(Arc::new(tap), addr, "".into()); engine.set_signer(Arc::new(tap), addr, "".into());
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock(); let b = b.close_and_lock();

View File

@ -0,0 +1,45 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
// Epoch verifiers.
use error::Error;
use header::Header;
/// Verifier for all blocks within an epoch with self-contained state.
///
/// See docs on `Engine` relating to proving functions for more details.
pub trait EpochVerifier: Sync {
/// Get the epoch number.
fn epoch_number(&self) -> u64;
/// Lightly verify the next block header.
/// This may not be a header belonging to a different epoch.
fn verify_light(&self, header: &Header) -> Result<(), Error>;
/// Perform potentially heavier checks on the next block header.
fn verify_heavy(&self, header: &Header) -> Result<(), Error> {
self.verify_light(header)
}
}
/// Special "no-op" verifier for stateless, epoch-less engines.
pub struct NoOp;
impl EpochVerifier for NoOp {
fn epoch_number(&self) -> u64 { 0 }
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) }
}

View File

@ -18,10 +18,10 @@ use std::collections::BTreeMap;
use util::{Address, HashMap}; use util::{Address, HashMap};
use builtin::Builtin; use builtin::Builtin;
use engines::{Engine, Seal}; use engines::{Engine, Seal};
use env_info::EnvInfo;
use spec::CommonParams; use spec::CommonParams;
use evm::Schedule; use evm::Schedule;
use block::ExecutedBlock; use block::ExecutedBlock;
use header::BlockNumber;
/// An engine which does not provide any consensus mechanism, just seals blocks internally. /// An engine which does not provide any consensus mechanism, just seals blocks internally.
pub struct InstantSeal { pub struct InstantSeal {
@ -58,8 +58,9 @@ impl Engine for InstantSeal {
&self.builtins &self.builtins
} }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule { fn schedule(&self, block_number: BlockNumber) -> Schedule {
Schedule::new_post_eip150(usize::max_value(), true, true, true) let eip86 = block_number >= self.params.eip86_transition;
Schedule::new_post_eip150(usize::max_value(), true, true, true, eip86)
} }
fn seals_internally(&self) -> Option<bool> { Some(true) } fn seals_internally(&self) -> Option<bool> { Some(true) }
@ -82,8 +83,7 @@ mod tests {
fn instant_can_seal() { fn instant_can_seal() {
let spec = Spec::new_instant(); let spec = Spec::new_instant();
let engine = &*spec.engine; let engine = &*spec.engine;
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap();

View File

@ -16,35 +16,42 @@
//! Consensus engine specification and basic implementations. //! Consensus engine specification and basic implementations.
mod transition;
mod vote_collector;
mod null_engine;
mod instant_seal;
mod basic_authority;
mod authority_round; mod authority_round;
mod tendermint; mod basic_authority;
mod validator_set; mod epoch_verifier;
mod instant_seal;
mod null_engine;
mod signer; mod signer;
mod tendermint;
mod transition;
mod validator_set;
mod vote_collector;
pub use self::null_engine::NullEngine;
pub use self::instant_seal::InstantSeal;
pub use self::basic_authority::BasicAuthority;
pub use self::authority_round::AuthorityRound; pub use self::authority_round::AuthorityRound;
pub use self::basic_authority::BasicAuthority;
pub use self::epoch_verifier::EpochVerifier;
pub use self::instant_seal::InstantSeal;
pub use self::null_engine::NullEngine;
pub use self::tendermint::Tendermint; pub use self::tendermint::Tendermint;
use std::sync::Weak; use std::sync::Weak;
use util::*;
use ethkey::Signature;
use account_provider::AccountProvider; use account_provider::AccountProvider;
use block::ExecutedBlock; use block::ExecutedBlock;
use builtin::Builtin; use builtin::Builtin;
use env_info::EnvInfo;
use error::{Error, TransactionError};
use spec::CommonParams;
use evm::Schedule;
use header::Header;
use transaction::{UnverifiedTransaction, SignedTransaction};
use client::Client; use client::Client;
use env_info::EnvInfo;
use error::Error;
use evm::Schedule;
use header::{Header, BlockNumber};
use receipt::Receipt;
use snapshot::SnapshotComponents;
use spec::CommonParams;
use transaction::{UnverifiedTransaction, SignedTransaction};
use evm::CreateContractAddress;
use ethkey::Signature;
use util::*;
/// Voting errors. /// Voting errors.
#[derive(Debug)] #[derive(Debug)]
@ -59,6 +66,8 @@ pub enum EngineError {
UnexpectedMessage, UnexpectedMessage,
/// Seal field has an unexpected size. /// Seal field has an unexpected size.
BadSealFieldSize(OutOfBounds<usize>), BadSealFieldSize(OutOfBounds<usize>),
/// Validation proof insufficient.
InsufficientProof(String),
} }
impl fmt::Display for EngineError { impl fmt::Display for EngineError {
@ -70,6 +79,7 @@ impl fmt::Display for EngineError {
NotAuthorized(ref address) => format!("Signer {} is not authorized.", address), NotAuthorized(ref address) => format!("Signer {} is not authorized.", address),
UnexpectedMessage => "This Engine should not be fed messages.".into(), UnexpectedMessage => "This Engine should not be fed messages.".into(),
BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob), BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob),
InsufficientProof(ref msg) => format!("Insufficient validation proof: {}", msg),
}; };
f.write_fmt(format_args!("Engine error ({})", msg)) f.write_fmt(format_args!("Engine error ({})", msg))
@ -87,6 +97,31 @@ pub enum Seal {
None, None,
} }
/// Type alias for a function we can make calls through synchronously.
pub type Call<'a> = Fn(Address, Bytes) -> Result<Bytes, String> + 'a;
/// Results of a query of whether an epoch change occurred at the given block.
#[derive(Debug, Clone, PartialEq)]
pub enum EpochChange {
/// Cannot determine until more data is passed.
Unsure(Unsure),
/// No epoch change.
No,
/// Validation proof required, and the new epoch number and expected proof.
Yes(u64, Bytes),
}
/// More data required to determine if an epoch change occurred at a given block.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Unsure {
/// Needs the body.
NeedsBody,
/// Needs the receipts.
NeedsReceipts,
/// Needs both body and receipts.
NeedsBoth,
}
/// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based. /// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based.
/// Provides hooks into each of the major parts of block import. /// Provides hooks into each of the major parts of block import.
pub trait Engine : Sync + Send { pub trait Engine : Sync + Send {
@ -107,8 +142,8 @@ pub trait Engine : Sync + Send {
/// Get the general parameters of the chain. /// Get the general parameters of the chain.
fn params(&self) -> &CommonParams; fn params(&self) -> &CommonParams;
/// Get the EVM schedule for the given `env_info`. /// Get the EVM schedule for the given `block_number`.
fn schedule(&self, env_info: &EnvInfo) -> Schedule; fn schedule(&self, block_number: BlockNumber) -> Schedule;
/// Builtin-contracts we would like to see in the chain. /// Builtin-contracts we would like to see in the chain.
/// (In principle these are just hints for the engine since that has the last word on them.) /// (In principle these are just hints for the engine since that has the last word on them.)
@ -152,18 +187,14 @@ pub trait Engine : Sync + Send {
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import. /// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import.
fn verify_block_family(&self, _header: &Header, _parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) } fn verify_block_family(&self, _header: &Header, _parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) }
/// Phase 4 verification. Verify block header against potentially external data.
fn verify_block_external(&self, _header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) }
/// Additional verification for transactions in blocks. /// Additional verification for transactions in blocks.
// TODO: Add flags for which bits of the transaction to check. // TODO: Add flags for which bits of the transaction to check.
// TODO: consider including State in the params. // TODO: consider including State in the params.
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, _header: &Header) -> Result<(), Error> { fn verify_transaction_basic(&self, t: &UnverifiedTransaction, _header: &Header) -> Result<(), Error> {
t.check_low_s()?; t.verify_basic(true, Some(self.params().network_id), true)?;
if let Some(n) = t.network_id() {
if n != self.params().chain_id {
return Err(TransactionError::InvalidNetworkId.into());
}
}
Ok(()) Ok(())
} }
@ -184,6 +215,40 @@ pub trait Engine : Sync + Send {
self.verify_block_basic(header, None).and_then(|_| self.verify_block_unordered(header, None)) self.verify_block_basic(header, None).and_then(|_| self.verify_block_unordered(header, None))
} }
/// Generate epoch change proof.
///
/// This will be used to generate proofs of epoch change as well as verify them.
/// Must be called on blocks that have already passed basic verification.
///
/// Return the "epoch proof" generated.
/// This must be usable to generate a `EpochVerifier` for verifying all blocks
/// from the supplied header up to the next one where proof is required.
///
/// For example, for PoA chains the proof will be a validator set,
/// and the corresponding `EpochVerifier` can be used to correctly validate
/// all blocks produced under that `ValidatorSet`
fn epoch_proof(&self, _header: &Header, _caller: &Call)
-> Result<Vec<u8>, Error>
{
Ok(Vec::new())
}
/// Whether an epoch change occurred at the given header.
/// Should not interact with state.
fn is_epoch_end(&self, _header: &Header, _block: Option<&[u8]>, _receipts: Option<&[Receipt]>)
-> EpochChange
{
EpochChange::No
}
/// Create an epoch verifier from validation proof.
///
/// The proof should be one generated by `epoch_proof`.
/// See docs of `epoch_proof` for description.
fn epoch_verifier(&self, _header: &Header, _proof: &[u8]) -> Result<Box<EpochVerifier>, Error> {
Ok(Box::new(self::epoch_verifier::NoOp))
}
/// Populate a header's fields based on its parent's header. /// Populate a header's fields based on its parent's header.
/// Usually implements the chain scoring rule based on weight. /// Usually implements the chain scoring rule based on weight.
/// The gas floor target must not be lower than the engine's minimum gas limit. /// The gas floor target must not be lower than the engine's minimum gas limit.
@ -224,4 +289,15 @@ pub trait Engine : Sync + Send {
/// Stops any services that the may hold the Engine and makes it safe to drop. /// Stops any services that the may hold the Engine and makes it safe to drop.
fn stop(&self) {} fn stop(&self) {}
/// Create a factory for building snapshot chunks and restoring from them.
/// Returning `None` indicates that this engine doesn't support snapshot creation.
fn snapshot_components(&self) -> Option<Box<SnapshotComponents>> {
None
}
/// Returns new contract address generation scheme at given block number.
fn create_address_scheme(&self, number: BlockNumber) -> CreateContractAddress {
if number >= self.params().eip86_transition { CreateContractAddress::FromCodeHash } else { CreateContractAddress::FromSenderAndNonce }
}
} }

View File

@ -20,7 +20,7 @@ use builtin::Builtin;
use engines::Engine; use engines::Engine;
use spec::CommonParams; use spec::CommonParams;
use evm::Schedule; use evm::Schedule;
use env_info::EnvInfo; use header::BlockNumber;
/// An engine which does not provide any consensus mechanism and does not seal blocks. /// An engine which does not provide any consensus mechanism and does not seal blocks.
pub struct NullEngine { pub struct NullEngine {
@ -57,7 +57,11 @@ impl Engine for NullEngine {
&self.builtins &self.builtins
} }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule { fn schedule(&self, _block_number: BlockNumber) -> Schedule {
Schedule::new_homestead() Schedule::new_homestead()
} }
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
Some(Box::new(::snapshot::PowSnapshot(10000)))
}
} }

View File

@ -30,9 +30,8 @@ use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
use util::*; use util::*;
use client::{Client, EngineClient}; use client::{Client, EngineClient};
use error::{Error, BlockError}; use error::{Error, BlockError};
use header::Header; use header::{Header, BlockNumber};
use builtin::Builtin; use builtin::Builtin;
use env_info::EnvInfo;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use ethkey::{recover, public_to_address, Signature}; use ethkey::{recover, public_to_address, Signature};
use account_provider::AccountProvider; use account_provider::AccountProvider;
@ -405,8 +404,9 @@ impl Engine for Tendermint {
] ]
} }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule { fn schedule(&self, block_number: BlockNumber) -> Schedule {
Schedule::new_post_eip150(usize::max_value(), true, true, true) let eip86 = block_number >= self.params.eip86_transition;
Schedule::new_post_eip150(usize::max_value(), true, true, true, eip86)
} }
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) { fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) {
@ -471,8 +471,8 @@ impl Engine for Tendermint {
return Err(EngineError::NotAuthorized(sender).into()); return Err(EngineError::NotAuthorized(sender).into());
} }
self.broadcast_message(rlp.as_raw().to_vec()); self.broadcast_message(rlp.as_raw().to_vec());
if self.votes.vote(message.clone(), &sender).is_some() { if let Some(double) = self.votes.vote(message.clone(), &sender) {
self.validators.report_malicious(&sender); self.validators.report_malicious(&sender, message.vote_step.height as BlockNumber, ::rlp::encode(&double).to_vec());
return Err(EngineError::DoubleVote(sender).into()); return Err(EngineError::DoubleVote(sender).into());
} }
trace!(target: "engine", "Handling a valid {:?} from {}.", message, sender); trace!(target: "engine", "Handling a valid {:?} from {}.", message, sender);
@ -560,7 +560,7 @@ impl Engine for Tendermint {
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor; let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
self.validators.report_malicious(header.author()); self.validators.report_malicious(header.author(), header.number(), Default::default());
return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into()); return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into());
} }
@ -610,8 +610,9 @@ impl Engine for Tendermint {
trace!(target: "engine", "Propose timeout."); trace!(target: "engine", "Propose timeout.");
if self.proposal.read().is_none() { if self.proposal.read().is_none() {
// Report the proposer if no proposal was received. // Report the proposer if no proposal was received.
let current_proposer = self.view_proposer(&*self.proposal_parent.read(), self.height.load(AtomicOrdering::SeqCst), self.view.load(AtomicOrdering::SeqCst)); let height = self.height.load(AtomicOrdering::SeqCst);
self.validators.report_benign(&current_proposer); let current_proposer = self.view_proposer(&*self.proposal_parent.read(), height, self.view.load(AtomicOrdering::SeqCst));
self.validators.report_benign(&current_proposer, height as BlockNumber);
} }
Step::Prevote Step::Prevote
}, },
@ -658,7 +659,6 @@ mod tests {
use block::*; use block::*;
use error::{Error, BlockError}; use error::{Error, BlockError};
use header::Header; use header::Header;
use env_info::EnvInfo;
use ethkey::Secret; use ethkey::Secret;
use client::chain_notify::ChainNotify; use client::chain_notify::ChainNotify;
use miner::MinerService; use miner::MinerService;
@ -676,8 +676,8 @@ mod tests {
} }
fn propose_default(spec: &Spec, proposer: Address) -> (ClosedBlock, Vec<Bytes>) { fn propose_default(spec: &Spec, proposer: Address) -> (ClosedBlock, Vec<Bytes>) {
let mut db_result = get_temp_state_db(); let db = get_temp_state_db();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap(); let db = spec.ensure_db_good(db, &Default::default()).unwrap();
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(spec.engine.as_ref(), Default::default(), false, db.boxed_clone(), &genesis_header, last_hashes, proposer, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(spec.engine.as_ref(), Default::default(), false, db.boxed_clone(), &genesis_header, last_hashes, proposer, (3141562.into(), 31415620.into()), vec![]).unwrap();
@ -740,15 +740,7 @@ mod tests {
#[test] #[test]
fn can_return_schedule() { fn can_return_schedule() {
let engine = Spec::new_test_tendermint().engine; let engine = Spec::new_test_tendermint().engine;
let schedule = engine.schedule(&EnvInfo { let schedule = engine.schedule(10000000);
number: 10000000,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0); assert!(schedule.stack_limit > 0);
} }

View File

@ -19,119 +19,96 @@
use std::sync::Weak; use std::sync::Weak;
use util::*; use util::*;
use futures::Future;
use native_contracts::ValidatorReport as Provider;
use client::{Client, BlockChainClient}; use client::{Client, BlockChainClient};
use engines::Call;
use header::{Header, BlockNumber};
use super::ValidatorSet; use super::ValidatorSet;
use super::safe_contract::ValidatorSafeContract; use super::safe_contract::ValidatorSafeContract;
/// The validator contract should have the following interface: /// A validator contract with reporting.
/// [{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"}]
pub struct ValidatorContract { pub struct ValidatorContract {
validators: ValidatorSafeContract, validators: ValidatorSafeContract,
provider: RwLock<Option<provider::Contract>>, provider: Provider,
client: RwLock<Option<Weak<Client>>>, // TODO [keorn]: remove
} }
impl ValidatorContract { impl ValidatorContract {
pub fn new(contract_address: Address) -> Self { pub fn new(contract_address: Address) -> Self {
ValidatorContract { ValidatorContract {
validators: ValidatorSafeContract::new(contract_address), validators: ValidatorSafeContract::new(contract_address),
provider: RwLock::new(None), provider: Provider::new(contract_address),
client: RwLock::new(None),
} }
} }
} }
impl ValidatorContract {
// could be `impl Trait`.
// note: dispatches transactions to network as well as execute.
// TODO [keorn]: Make more general.
fn transact(&self) -> Box<Call> {
let client = self.client.read().clone();
Box::new(move |a, d| client.as_ref()
.and_then(Weak::upgrade)
.ok_or("No client!".into())
.and_then(|c| c.transact_contract(a, d).map_err(|e| format!("Transaction import error: {}", e)))
.map(|_| Default::default()))
}
}
impl ValidatorSet for ValidatorContract { impl ValidatorSet for ValidatorContract {
fn contains(&self, bh: &H256, address: &Address) -> bool { fn default_caller(&self, id: ::ids::BlockId) -> Box<Call> {
self.validators.contains(bh, address) self.validators.default_caller(id)
} }
fn get(&self, bh: &H256, nonce: usize) -> Address { fn is_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
self.validators.get(bh, nonce) -> ::engines::EpochChange
{
self.validators.is_epoch_end(header, block, receipts)
} }
fn count(&self, bh: &H256) -> usize { fn epoch_proof(&self, header: &Header, caller: &Call) -> Result<Vec<u8>, String> {
self.validators.count(bh) self.validators.epoch_proof(header, caller)
} }
fn report_malicious(&self, address: &Address) { fn epoch_set(&self, header: &Header, proof: &[u8]) -> Result<(u64, super::SimpleList), ::error::Error> {
if let Some(ref provider) = *self.provider.read() { self.validators.epoch_set(header, proof)
match provider.report_malicious(address) { }
fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool {
self.validators.contains_with_caller(bh, address, caller)
}
fn get_with_caller(&self, bh: &H256, nonce: usize, caller: &Call) -> Address {
self.validators.get_with_caller(bh, nonce, caller)
}
fn count_with_caller(&self, bh: &H256, caller: &Call) -> usize {
self.validators.count_with_caller(bh, caller)
}
fn report_malicious(&self, address: &Address, block: BlockNumber, proof: Bytes) {
match self.provider.report_malicious(&*self.transact(), *address, block.into(), proof).wait() {
Ok(_) => warn!(target: "engine", "Reported malicious validator {}", address), Ok(_) => warn!(target: "engine", "Reported malicious validator {}", address),
Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s), Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s),
} }
} else {
warn!(target: "engine", "Malicious behaviour could not be reported: no provider contract.")
}
} }
fn report_benign(&self, address: &Address) { fn report_benign(&self, address: &Address, block: BlockNumber) {
if let Some(ref provider) = *self.provider.read() { match self.provider.report_benign(&*self.transact(), *address, block.into()).wait() {
match provider.report_benign(address) {
Ok(_) => warn!(target: "engine", "Reported benign validator misbehaviour {}", address), Ok(_) => warn!(target: "engine", "Reported benign validator misbehaviour {}", address),
Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s), Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s),
} }
} else {
warn!(target: "engine", "Benign misbehaviour could not be reported: no provider contract.")
}
} }
fn register_contract(&self, client: Weak<Client>) { fn register_contract(&self, client: Weak<Client>) {
self.validators.register_contract(client.clone()); self.validators.register_contract(client.clone());
let transact = move |a, d| client *self.client.write() = Some(client);
.upgrade()
.ok_or("No client!".into())
.and_then(|c| c.transact_contract(a, d).map_err(|e| format!("Transaction import error: {}", e)))
.map(|_| Default::default());
*self.provider.write() = Some(provider::Contract::new(self.validators.address, transact));
}
}
mod provider {
// Autogenerated from JSON contract definition using Rust contract convertor.
#![allow(unused_imports)]
use std::string::String;
use std::result::Result;
use std::fmt;
use {util, ethabi};
use util::{Uint};
pub struct Contract {
contract: ethabi::Contract,
address: util::Address,
do_call: Box<Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send + Sync + 'static>,
}
impl Contract {
pub fn new<F>(address: util::Address, do_call: F) -> Self where F: Fn(util::Address, Vec<u8>) -> Result<Vec<u8>, String> + Send + Sync + 'static {
Contract {
contract: ethabi::Contract::new(ethabi::Interface::load(b"[{\"constant\":false,\"inputs\":[{\"name\":\"validator\",\"type\":\"address\"}],\"name\":\"reportMalicious\",\"outputs\":[],\"payable\":false,\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"validator\",\"type\":\"address\"}],\"name\":\"reportBenign\",\"outputs\":[],\"payable\":false,\"type\":\"function\"}]").expect("JSON is autogenerated; qed")),
address: address,
do_call: Box::new(do_call),
}
}
fn as_string<T: fmt::Debug>(e: T) -> String { format!("{:?}", e) }
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"validator","type":"address"}],"name":"reportMalicious","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn report_malicious(&self, validator: &util::Address) -> Result<(), String> {
let call = self.contract.function("reportMalicious".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(validator.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
/// Auto-generated from: `{"constant":false,"inputs":[{"name":"validator","type":"address"}],"name":"reportBenign","outputs":[],"payable":false,"type":"function"}`
#[allow(dead_code)]
pub fn report_benign(&self, validator: &util::Address) -> Result<(), String> {
let call = self.contract.function("reportBenign".into()).map_err(Self::as_string)?;
let data = call.encode_call(
vec![ethabi::Token::Address(validator.clone().0)]
).map_err(Self::as_string)?;
call.decode_output((self.do_call)(self.address.clone(), data)?).map_err(Self::as_string)?;
Ok(())
}
} }
} }
@ -180,7 +157,7 @@ mod tests {
header.set_parent_hash(client.chain_info().best_block_hash); header.set_parent_hash(client.chain_info().best_block_hash);
// `reportBenign` when the designated proposer releases block from the future (bad clock). // `reportBenign` when the designated proposer releases block from the future (bad clock).
assert!(client.engine().verify_block_family(&header, &header, None).is_err()); assert!(client.engine().verify_block_external(&header, None).is_err());
// Seal a block. // Seal a block.
client.engine().step(); client.engine().step();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);

View File

@ -22,14 +22,19 @@ mod contract;
mod multi; mod multi;
use std::sync::Weak; use std::sync::Weak;
use util::{Address, H256}; use ids::BlockId;
use util::{Bytes, Address, H256};
use ethjson::spec::ValidatorSet as ValidatorSpec; use ethjson::spec::ValidatorSet as ValidatorSpec;
use client::Client; use client::Client;
use self::simple_list::SimpleList; use header::{Header, BlockNumber};
pub use self::simple_list::SimpleList;
use self::contract::ValidatorContract; use self::contract::ValidatorContract;
use self::safe_contract::ValidatorSafeContract; use self::safe_contract::ValidatorSafeContract;
use self::multi::Multi; use self::multi::Multi;
use super::Call;
/// Creates a validator set from spec. /// Creates a validator set from spec.
pub fn new_validator_set(spec: ValidatorSpec) -> Box<ValidatorSet> { pub fn new_validator_set(spec: ValidatorSpec) -> Box<ValidatorSet> {
match spec { match spec {
@ -42,17 +47,73 @@ pub fn new_validator_set(spec: ValidatorSpec) -> Box<ValidatorSet> {
} }
} }
/// A validator set.
pub trait ValidatorSet: Send + Sync { pub trait ValidatorSet: Send + Sync {
/// Checks if a given address is a validator. /// Get the default "Call" helper, for use in general operation.
fn contains(&self, parent_block_hash: &H256, address: &Address) -> bool; // TODO [keorn]: this is a hack intended to migrate off of
// a strict dependency on state always being available.
fn default_caller(&self, block_id: BlockId) -> Box<Call>;
/// Checks if a given address is a validator,
/// using underlying, default call mechanism.
fn contains(&self, parent: &H256, address: &Address) -> bool {
let default = self.default_caller(BlockId::Hash(*parent));
self.contains_with_caller(parent, address, &*default)
}
/// Draws an validator nonce modulo number of validators. /// Draws an validator nonce modulo number of validators.
fn get(&self, parent_block_hash: &H256, nonce: usize) -> Address; fn get(&self, parent: &H256, nonce: usize) -> Address {
let default = self.default_caller(BlockId::Hash(*parent));
self.get_with_caller(parent, nonce, &*default)
}
/// Returns the current number of validators. /// Returns the current number of validators.
fn count(&self, parent_block_hash: &H256) -> usize; fn count(&self, parent: &H256) -> usize {
let default = self.default_caller(BlockId::Hash(*parent));
self.count_with_caller(parent, &*default)
}
/// Whether this block is the last one in its epoch.
/// Usually indicates that the validator set changed at the given block.
///
/// Should not inspect state! This is used in situations where
/// state is not generally available.
///
/// Return `Yes` or `No` indicating whether it changed at the given header,
/// or `Unsure` indicating a need for more information.
///
/// If block or receipts are provided, do not return `Unsure` indicating
/// need for them.
fn is_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
-> super::EpochChange;
/// Generate epoch proof.
/// Must interact with state only through the given caller!
/// Otherwise, generated proofs may be wrong.
fn epoch_proof(&self, header: &Header, caller: &Call) -> Result<Vec<u8>, String>;
/// Recover the validator set for all
///
/// May fail if the given header doesn't kick off an epoch or
/// the proof is invalid.
///
/// Returns the epoch number and proof.
fn epoch_set(&self, header: &Header, proof: &[u8]) -> Result<(u64, SimpleList), ::error::Error>;
/// Checks if a given address is a validator, with the given function
/// for executing synchronous calls to contracts.
fn contains_with_caller(&self, parent_block_hash: &H256, address: &Address, caller: &Call) -> bool;
/// Draws an validator nonce modulo number of validators.
///
fn get_with_caller(&self, parent_block_hash: &H256, nonce: usize, caller: &Call) -> Address;
/// Returns the current number of validators.
fn count_with_caller(&self, parent_block_hash: &H256, caller: &Call) -> usize;
/// Notifies about malicious behaviour. /// Notifies about malicious behaviour.
fn report_malicious(&self, _validator: &Address) {} fn report_malicious(&self, _validator: &Address, _block: BlockNumber, _proof: Bytes) {}
/// Notifies about benign misbehaviour. /// Notifies about benign misbehaviour.
fn report_benign(&self, _validator: &Address) {} fn report_benign(&self, _validator: &Address, _block: BlockNumber) {}
/// Allows blockchain state access. /// Allows blockchain state access.
fn register_contract(&self, _client: Weak<Client>) {} fn register_contract(&self, _client: Weak<Client>) {}
} }

View File

@ -18,13 +18,14 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::sync::Weak; use std::sync::Weak;
use util::{H256, Address, RwLock}; use engines::{Call, EpochChange};
use util::{Bytes, H256, Address, RwLock};
use ids::BlockId; use ids::BlockId;
use header::BlockNumber; use header::{BlockNumber, Header};
use client::{Client, BlockChainClient}; use client::{Client, BlockChainClient};
use super::ValidatorSet; use super::ValidatorSet;
type BlockNumberLookup = Box<Fn(&H256) -> Result<BlockNumber, String> + Send + Sync + 'static>; type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
pub struct Multi { pub struct Multi {
sets: BTreeMap<BlockNumber, Box<ValidatorSet>>, sets: BTreeMap<BlockNumber, Box<ValidatorSet>>,
@ -40,64 +41,91 @@ impl Multi {
} }
} }
fn correct_set(&self, bh: &H256) -> Option<&Box<ValidatorSet>> { fn correct_set(&self, id: BlockId) -> Option<&ValidatorSet> {
match self match self.block_number.read()(id).map(|parent_block| self.correct_set_by_number(parent_block)) {
.block_number Ok((_, set)) => Some(set),
.read()(bh)
.map(|parent_block| self
.sets
.iter()
.rev()
.find(|&(block, _)| *block <= parent_block + 1)
.expect("constructor validation ensures that there is at least one validator set for block 0;
block 0 is less than any uint;
qed")
) {
Ok((block, set)) => {
trace!(target: "engine", "Multi ValidatorSet retrieved for block {}.", block);
Some(set)
},
Err(e) => { Err(e) => {
debug!(target: "engine", "ValidatorSet could not be recovered: {}", e); debug!(target: "engine", "ValidatorSet could not be recovered: {}", e);
None None
}, },
} }
} }
// get correct set by block number, along with block number at which
// this set was activated.
fn correct_set_by_number(&self, parent_block: BlockNumber) -> (BlockNumber, &ValidatorSet) {
let (block, set) = self.sets.iter()
.rev()
.find(|&(block, _)| *block <= parent_block + 1)
.expect("constructor validation ensures that there is at least one validator set for block 0;
block 0 is less than any uint;
qed");
trace!(target: "engine", "Multi ValidatorSet retrieved for block {}.", block);
(*block, &**set)
}
} }
impl ValidatorSet for Multi { impl ValidatorSet for Multi {
fn contains(&self, bh: &H256, address: &Address) -> bool { fn default_caller(&self, block_id: BlockId) -> Box<Call> {
self.correct_set(bh).map_or(false, |set| set.contains(bh, address)) self.correct_set(block_id).map(|set| set.default_caller(block_id))
.unwrap_or(Box::new(|_, _| Err("No validator set for given ID.".into())))
} }
fn get(&self, bh: &H256, nonce: usize) -> Address { fn is_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
self.correct_set(bh).map_or_else(Default::default, |set| set.get(bh, nonce)) -> EpochChange
} {
let (set_block, set) = self.correct_set_by_number(header.number());
fn count(&self, bh: &H256) -> usize { match set.is_epoch_end(header, block, receipts) {
self.correct_set(bh).map_or_else(usize::max_value, |set| set.count(bh)) EpochChange::Yes(num, proof) => EpochChange::Yes(set_block + num, proof),
} other => other,
fn report_malicious(&self, validator: &Address) {
for set in self.sets.values() {
set.report_malicious(validator);
} }
} }
fn report_benign(&self, validator: &Address) { fn epoch_proof(&self, header: &Header, caller: &Call) -> Result<Vec<u8>, String> {
for set in self.sets.values() { self.correct_set_by_number(header.number()).1.epoch_proof(header, caller)
set.report_benign(validator);
} }
fn epoch_set(&self, header: &Header, proof: &[u8]) -> Result<(u64, super::SimpleList), ::error::Error> {
// "multi" epoch is the inner set's epoch plus the transition block to that set.
// ensures epoch increases monotonically.
let (set_block, set) = self.correct_set_by_number(header.number());
let (inner_epoch, list) = set.epoch_set(header, proof)?;
Ok((set_block + inner_epoch, list))
}
fn contains_with_caller(&self, bh: &H256, address: &Address, caller: &Call) -> bool {
self.correct_set(BlockId::Hash(*bh))
.map_or(false, |set| set.contains_with_caller(bh, address, caller))
}
fn get_with_caller(&self, bh: &H256, nonce: usize, caller: &Call) -> Address {
self.correct_set(BlockId::Hash(*bh))
.map_or_else(Default::default, |set| set.get_with_caller(bh, nonce, caller))
}
fn count_with_caller(&self, bh: &H256, caller: &Call) -> usize {
self.correct_set(BlockId::Hash(*bh))
.map_or_else(usize::max_value, |set| set.count_with_caller(bh, caller))
}
fn report_malicious(&self, validator: &Address, block: BlockNumber, proof: Bytes) {
self.correct_set_by_number(block).1.report_malicious(validator, block, proof);
}
fn report_benign(&self, validator: &Address, block: BlockNumber) {
self.correct_set_by_number(block).1.report_benign(validator, block);
} }
fn register_contract(&self, client: Weak<Client>) { fn register_contract(&self, client: Weak<Client>) {
for set in self.sets.values() { for set in self.sets.values() {
set.register_contract(client.clone()); set.register_contract(client.clone());
} }
*self.block_number.write() = Box::new(move |hash| client *self.block_number.write() = Box::new(move |id| client
.upgrade() .upgrade()
.ok_or("No client!".into()) .ok_or("No client!".into())
.and_then(|c| c.block_number(BlockId::Hash(*hash)).ok_or("Unknown block".into()))); .and_then(|c| c.block_number(id).ok_or("Unknown block".into())));
} }
} }

View File

@ -17,24 +17,45 @@
/// Validator set maintained in a contract, updated using `getValidators` method. /// Validator set maintained in a contract, updated using `getValidators` method.
use std::sync::Weak; use std::sync::Weak;
use ethabi; use futures::Future;
use native_contracts::ValidatorSet as Provider;
use util::*; use util::*;
use util::cache::MemoryLruCache; use util::cache::MemoryLruCache;
use types::ids::BlockId;
use basic_types::LogBloom;
use client::{Client, BlockChainClient}; use client::{Client, BlockChainClient};
use engines::Call;
use header::Header;
use ids::BlockId;
use log_entry::LogEntry;
use super::ValidatorSet; use super::ValidatorSet;
use super::simple_list::SimpleList; use super::simple_list::SimpleList;
const MEMOIZE_CAPACITY: usize = 500; const MEMOIZE_CAPACITY: usize = 500;
const CONTRACT_INTERFACE: &'static [u8] = b"[{\"constant\":true,\"inputs\":[],\"name\":\"getValidators\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"type\":\"function\"}]";
const GET_VALIDATORS: &'static str = "getValidators"; // TODO: ethabi should be able to generate this.
const EVENT_NAME: &'static [u8] = &*b"ValidatorsChanged(bytes32,uint256,address[])";
lazy_static! {
static ref EVENT_NAME_HASH: H256 = EVENT_NAME.sha3();
}
/// The validator contract should have the following interface: /// The validator contract should have the following interface:
/// [{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"}]
pub struct ValidatorSafeContract { pub struct ValidatorSafeContract {
pub address: Address, pub address: Address,
validators: RwLock<MemoryLruCache<H256, SimpleList>>, validators: RwLock<MemoryLruCache<H256, SimpleList>>,
provider: RwLock<Option<provider::Contract>>, provider: Provider,
client: RwLock<Option<Weak<Client>>>, // TODO [keorn]: remove
}
fn encode_proof(nonce: U256, validators: &[Address]) -> Bytes {
use rlp::RlpStream;
let mut stream = RlpStream::new_list(2);
stream.append(&nonce).append_list(validators);
stream.drain().to_vec()
} }
impl ValidatorSafeContract { impl ValidatorSafeContract {
@ -42,14 +63,14 @@ impl ValidatorSafeContract {
ValidatorSafeContract { ValidatorSafeContract {
address: contract_address, address: contract_address,
validators: RwLock::new(MemoryLruCache::new(MEMOIZE_CAPACITY)), validators: RwLock::new(MemoryLruCache::new(MEMOIZE_CAPACITY)),
provider: RwLock::new(None), provider: Provider::new(contract_address),
client: RwLock::new(None),
} }
} }
/// Queries the state and gets the set of validators. /// Queries the state and gets the set of validators.
fn get_list(&self, block_hash: H256) -> Option<SimpleList> { fn get_list(&self, caller: &Call) -> Option<SimpleList> {
if let Some(ref provider) = *self.provider.read() { match self.provider.get_validators(caller).wait() {
match provider.get_validators(BlockId::Hash(block_hash)) {
Ok(new) => { Ok(new) => {
debug!(target: "engine", "Set of validators obtained: {:?}", new); debug!(target: "engine", "Set of validators obtained: {:?}", new);
Some(SimpleList::new(new)) Some(SimpleList::new(new))
@ -59,22 +80,151 @@ impl ValidatorSafeContract {
None None
}, },
} }
} else { }
warn!(target: "engine", "Set of validators could not be updated: no provider contract.");
/// Queries for the current validator set transition nonce.
fn get_nonce(&self, caller: &Call) -> Option<::util::U256> {
match self.provider.transition_nonce(caller).wait() {
Ok(nonce) => Some(nonce),
Err(s) => {
debug!(target: "engine", "Unable to fetch transition nonce: {}", s);
None None
} }
} }
}
// Whether the header matches the expected bloom.
//
// The expected log should have 3 topics:
// 1. ETHABI-encoded log name.
// 2. the block's parent hash.
// 3. the "nonce": n for the nth transition in history.
//
// We can only search for the first 2, since we don't have the third
// just yet.
//
// The parent hash is included to prevent
// malicious actors from brute forcing other logs that would
// produce the same bloom.
//
// The log data is an array of all new validator addresses.
fn expected_bloom(&self, header: &Header) -> LogBloom {
LogEntry {
address: self.address,
topics: vec![*EVENT_NAME_HASH, *header.parent_hash()],
data: Vec::new(), // irrelevant for bloom.
}.bloom()
}
} }
impl ValidatorSet for ValidatorSafeContract { impl ValidatorSet for ValidatorSafeContract {
fn contains(&self, block_hash: &H256, address: &Address) -> bool { fn default_caller(&self, id: BlockId) -> Box<Call> {
let client = self.client.read().clone();
Box::new(move |addr, data| client.as_ref()
.and_then(Weak::upgrade)
.ok_or("No client!".into())
.and_then(|c| c.call_contract(id, addr, data)))
}
fn is_epoch_end(&self, header: &Header, _block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
-> ::engines::EpochChange
{
let bloom = self.expected_bloom(header);
let header_bloom = header.log_bloom();
if &bloom & header_bloom != bloom { return ::engines::EpochChange::No }
match receipts {
None => ::engines::EpochChange::Unsure(::engines::Unsure::NeedsReceipts),
Some(receipts) => {
let check_log = |log: &LogEntry| {
log.address == self.address &&
log.topics.len() == 3 &&
log.topics[0] == *EVENT_NAME_HASH &&
log.topics[1] == *header.parent_hash()
// don't have anything to compare nonce to yet.
};
let event = Provider::contract(&self.provider)
.event("ValidatorsChanged".into())
.expect("Contract known ahead of time to have `ValidatorsChanged` event; qed");
// iterate in reverse because only the _last_ change in a given
// block actually has any effect.
// the contract should only increment the nonce once.
let mut decoded_events = receipts.iter()
.rev()
.filter(|r| &bloom & &r.log_bloom == bloom)
.flat_map(|r| r.logs.iter())
.filter(move |l| check_log(l))
.filter_map(|log| {
let topics = log.topics.iter().map(|x| x.0.clone()).collect();
match event.decode_log(topics, log.data.clone()) {
Ok(decoded) => Some(decoded),
Err(_) => None,
}
});
match decoded_events.next() {
None => ::engines::EpochChange::No,
Some(matched_event) => {
// decode log manually until the native contract generator is
// good enough to do it for us.
let &(_, _, ref nonce_token) = &matched_event.params[1];
let &(_, _, ref validators_token) = &matched_event.params[2];
let nonce: Option<U256> = nonce_token.clone().to_uint()
.map(H256).map(Into::into);
let validators = validators_token.clone().to_array()
.and_then(|a| a.into_iter()
.map(|x| x.to_address().map(H160))
.collect::<Option<Vec<_>>>()
);
match (nonce, validators) {
(Some(nonce), Some(validators)) => {
let proof = encode_proof(nonce, &validators);
let new_epoch = nonce.low_u64();
::engines::EpochChange::Yes(new_epoch, proof)
}
_ => {
debug!(target: "engine", "Successfully decoded log turned out to be bad.");
::engines::EpochChange::No
}
}
}
}
}
}
}
// the proof we generate is an RLP list containing two parts.
// (nonce, validators)
fn epoch_proof(&self, _header: &Header, caller: &Call) -> Result<Vec<u8>, String> {
match (self.get_nonce(caller), self.get_list(caller)) {
(Some(nonce), Some(list)) => Ok(encode_proof(nonce, &list.into_inner())),
_ => Err("Caller insufficient to generate validator proof.".into()),
}
}
fn epoch_set(&self, _header: &Header, proof: &[u8]) -> Result<(u64, SimpleList), ::error::Error> {
use rlp::UntrustedRlp;
let rlp = UntrustedRlp::new(proof);
let nonce: u64 = rlp.val_at(0)?;
let validators: Vec<Address> = rlp.list_at(1)?;
Ok((nonce, SimpleList::new(validators)))
}
fn contains_with_caller(&self, block_hash: &H256, address: &Address, caller: &Call) -> bool {
let mut guard = self.validators.write(); let mut guard = self.validators.write();
let maybe_existing = guard let maybe_existing = guard
.get_mut(block_hash) .get_mut(block_hash)
.map(|list| list.contains(block_hash, address)); .map(|list| list.contains(block_hash, address));
maybe_existing maybe_existing
.unwrap_or_else(|| self .unwrap_or_else(|| self
.get_list(block_hash.clone()) .get_list(caller)
.map_or(false, |list| { .map_or(false, |list| {
let contains = list.contains(block_hash, address); let contains = list.contains(block_hash, address);
guard.insert(block_hash.clone(), list); guard.insert(block_hash.clone(), list);
@ -82,14 +232,14 @@ impl ValidatorSet for ValidatorSafeContract {
})) }))
} }
fn get(&self, block_hash: &H256, nonce: usize) -> Address { fn get_with_caller(&self, block_hash: &H256, nonce: usize, caller: &Call) -> Address {
let mut guard = self.validators.write(); let mut guard = self.validators.write();
let maybe_existing = guard let maybe_existing = guard
.get_mut(block_hash) .get_mut(block_hash)
.map(|list| list.get(block_hash, nonce)); .map(|list| list.get(block_hash, nonce));
maybe_existing maybe_existing
.unwrap_or_else(|| self .unwrap_or_else(|| self
.get_list(block_hash.clone()) .get_list(caller)
.map_or_else(Default::default, |list| { .map_or_else(Default::default, |list| {
let address = list.get(block_hash, nonce); let address = list.get(block_hash, nonce);
guard.insert(block_hash.clone(), list); guard.insert(block_hash.clone(), list);
@ -97,14 +247,14 @@ impl ValidatorSet for ValidatorSafeContract {
})) }))
} }
fn count(&self, block_hash: &H256) -> usize { fn count_with_caller(&self, block_hash: &H256, caller: &Call) -> usize {
let mut guard = self.validators.write(); let mut guard = self.validators.write();
let maybe_existing = guard let maybe_existing = guard
.get_mut(block_hash) .get_mut(block_hash)
.map(|list| list.count(block_hash)); .map(|list| list.count(block_hash));
maybe_existing maybe_existing
.unwrap_or_else(|| self .unwrap_or_else(|| self
.get_list(block_hash.clone()) .get_list(caller)
.map_or_else(usize::max_value, |list| { .map_or_else(usize::max_value, |list| {
let address = list.count(block_hash); let address = list.count(block_hash);
guard.insert(block_hash.clone(), list); guard.insert(block_hash.clone(), list);
@ -114,55 +264,7 @@ impl ValidatorSet for ValidatorSafeContract {
fn register_contract(&self, client: Weak<Client>) { fn register_contract(&self, client: Weak<Client>) {
trace!(target: "engine", "Setting up contract caller."); trace!(target: "engine", "Setting up contract caller.");
let contract = ethabi::Contract::new(ethabi::Interface::load(CONTRACT_INTERFACE).expect("JSON interface is valid; qed")); *self.client.write() = Some(client);
let call = contract.function(GET_VALIDATORS.into()).expect("Method name is valid; qed");
let data = call.encode_call(vec![]).expect("get_validators does not take any arguments; qed");
let contract_address = self.address.clone();
let do_call = move |id| client
.upgrade()
.ok_or("No client!".into())
.and_then(|c| c.call_contract(id, contract_address.clone(), data.clone()))
.map(|raw_output| call.decode_output(raw_output).expect("ethabi is correct; qed"));
*self.provider.write() = Some(provider::Contract::new(do_call));
}
}
mod provider {
use std::string::String;
use std::result::Result;
use {util, ethabi};
use types::ids::BlockId;
pub struct Contract {
do_call: Box<Fn(BlockId) -> Result<Vec<ethabi::Token>, String> + Send + Sync + 'static>,
}
impl Contract {
pub fn new<F>(do_call: F) -> Self where F: Fn(BlockId) -> Result<Vec<ethabi::Token>, String> + Send + Sync + 'static {
Contract {
do_call: Box::new(do_call),
}
}
/// Gets validators from contract with interface: `{"constant":true,"inputs":[],"name":"getValidators","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"}`
pub fn get_validators(&self, id: BlockId) -> Result<Vec<util::Address>, String> {
Ok((self.do_call)(id)?
.into_iter()
.rev()
.collect::<Vec<_>>()
.pop()
.expect("get_validators returns one argument; qed")
.to_array()
.and_then(|v| v
.into_iter()
.map(|a| a.to_address())
.collect::<Option<Vec<[u8; 20]>>>())
.expect("get_validators returns a list of addresses; qed")
.into_iter()
.map(util::Address::from)
.collect::<Vec<_>>()
)
}
} }
} }
@ -178,7 +280,7 @@ mod tests {
use miner::MinerService; use miner::MinerService;
use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data}; use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data};
use super::super::ValidatorSet; use super::super::ValidatorSet;
use super::ValidatorSafeContract; use super::{ValidatorSafeContract, EVENT_NAME_HASH};
#[test] #[test]
fn fetches_validators() { fn fetches_validators() {
@ -196,6 +298,7 @@ mod tests {
let s0 = Secret::from_slice(&"1".sha3()).unwrap(); let s0 = Secret::from_slice(&"1".sha3()).unwrap();
let v0 = tap.insert_account(s0.clone(), "").unwrap(); let v0 = tap.insert_account(s0.clone(), "").unwrap();
let v1 = tap.insert_account(Secret::from_slice(&"0".sha3()).unwrap(), "").unwrap(); let v1 = tap.insert_account(Secret::from_slice(&"0".sha3()).unwrap(), "").unwrap();
let network_id = Spec::new_validator_safe_contract().network_id();
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap)); let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap));
client.engine().register_client(Arc::downgrade(&client)); client.engine().register_client(Arc::downgrade(&client));
let validator_contract = Address::from_str("0000000000000000000000000000000000000005").unwrap(); let validator_contract = Address::from_str("0000000000000000000000000000000000000005").unwrap();
@ -209,7 +312,7 @@ mod tests {
action: Action::Call(validator_contract), action: Action::Call(validator_contract),
value: 0.into(), value: 0.into(),
data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
}.sign(&s0, None); }.sign(&s0, Some(network_id));
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
client.update_sealing(); client.update_sealing();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);
@ -221,7 +324,7 @@ mod tests {
action: Action::Call(validator_contract), action: Action::Call(validator_contract),
value: 0.into(), value: 0.into(),
data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(), data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
}.sign(&s0, None); }.sign(&s0, Some(network_id));
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
client.update_sealing(); client.update_sealing();
// The transaction is not yet included so still unable to seal. // The transaction is not yet included so still unable to seal.
@ -240,7 +343,7 @@ mod tests {
action: Action::Call(Address::default()), action: Action::Call(Address::default()),
value: 0.into(), value: 0.into(),
data: Vec::new(), data: Vec::new(),
}.sign(&s0, None); }.sign(&s0, Some(network_id));
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap(); client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
client.update_sealing(); client.update_sealing();
// Able to seal again. // Able to seal again.
@ -255,4 +358,35 @@ mod tests {
sync_client.flush_queue(); sync_client.flush_queue();
assert_eq!(sync_client.chain_info().best_block_number, 3); assert_eq!(sync_client.chain_info().best_block_number, 3);
} }
#[test]
fn detects_bloom() {
use header::Header;
use engines::{EpochChange, Unsure};
use log_entry::LogEntry;
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None);
let engine = client.engine().clone();
let validator_contract = Address::from_str("0000000000000000000000000000000000000005").unwrap();
let last_hash = client.best_block_header().hash();
let mut new_header = Header::default();
new_header.set_parent_hash(last_hash);
// first, try without the parent hash.
let mut event = LogEntry {
address: validator_contract,
topics: vec![*EVENT_NAME_HASH],
data: Vec::new(),
};
new_header.set_log_bloom(event.bloom());
assert_eq!(engine.is_epoch_end(&new_header, None, None), EpochChange::No);
// with the last hash, it should need the receipts.
event.topics.push(last_hash);
new_header.set_log_bloom(event.bloom());
assert_eq!(engine.is_epoch_end(&new_header, None, None),
EpochChange::Unsure(Unsure::NeedsReceipts));
}
} }

View File

@ -17,40 +17,67 @@
/// Preconfigured validator list. /// Preconfigured validator list.
use util::{H256, Address, HeapSizeOf}; use util::{H256, Address, HeapSizeOf};
use engines::Call;
use header::Header;
use super::ValidatorSet; use super::ValidatorSet;
#[derive(Debug, PartialEq, Eq, Default)] /// Validator set containing a known set of addresses.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
pub struct SimpleList { pub struct SimpleList {
validators: Vec<Address>, validators: Vec<Address>,
validator_n: usize,
} }
impl SimpleList { impl SimpleList {
/// Create a new `SimpleList`.
pub fn new(validators: Vec<Address>) -> Self { pub fn new(validators: Vec<Address>) -> Self {
SimpleList { SimpleList {
validator_n: validators.len(),
validators: validators, validators: validators,
} }
} }
/// Convert into inner representation.
pub fn into_inner(self) -> Vec<Address> {
self.validators
}
} }
impl HeapSizeOf for SimpleList { impl HeapSizeOf for SimpleList {
fn heap_size_of_children(&self) -> usize { fn heap_size_of_children(&self) -> usize {
self.validators.heap_size_of_children() + self.validator_n.heap_size_of_children() self.validators.heap_size_of_children()
} }
} }
impl ValidatorSet for SimpleList { impl ValidatorSet for SimpleList {
fn contains(&self, _bh: &H256, address: &Address) -> bool { fn default_caller(&self, _block_id: ::ids::BlockId) -> Box<Call> {
Box::new(|_, _| Err("Simple list doesn't require calls.".into()))
}
fn is_epoch_end(&self, _header: &Header, _block: Option<&[u8]>, _receipts: Option<&[::receipt::Receipt]>)
-> ::engines::EpochChange
{
::engines::EpochChange::No
}
fn epoch_proof(&self, _header: &Header, _caller: &Call) -> Result<Vec<u8>, String> {
Ok(Vec::new())
}
fn epoch_set(&self, _header: &Header, _: &[u8]) -> Result<(u64, SimpleList), ::error::Error> {
Ok((0, self.clone()))
}
fn contains_with_caller(&self, _bh: &H256, address: &Address, _: &Call) -> bool {
self.validators.contains(address) self.validators.contains(address)
} }
fn get(&self, _bh: &H256, nonce: usize) -> Address { fn get_with_caller(&self, _bh: &H256, nonce: usize, _: &Call) -> Address {
self.validators.get(nonce % self.validator_n).expect("There are validator_n authorities; taking number modulo validator_n gives number in validator_n range; qed").clone() let validator_n = self.validators.len();
self.validators.get(nonce % validator_n).expect("There are validator_n authorities; taking number modulo validator_n gives number in validator_n range; qed").clone()
} }
fn count(&self, _bh: &H256) -> usize { fn count_with_caller(&self, _bh: &H256, _: &Call) -> usize {
self.validator_n self.validators.len()
} }
} }

View File

@ -18,7 +18,7 @@
use std::fmt::Debug; use std::fmt::Debug;
use util::*; use util::*;
use rlp::Encodable; use rlp::{Encodable, RlpStream};
pub trait Message: Clone + PartialEq + Eq + Hash + Encodable + Debug { pub trait Message: Clone + PartialEq + Eq + Hash + Encodable + Debug {
type Round: Clone + PartialEq + Eq + Hash + Default + Debug + Ord; type Round: Clone + PartialEq + Eq + Hash + Default + Debug + Ord;
@ -40,25 +40,44 @@ pub struct VoteCollector<M: Message> {
#[derive(Debug, Default)] #[derive(Debug, Default)]
struct StepCollector<M: Message> { struct StepCollector<M: Message> {
voted: HashSet<Address>, voted: HashMap<Address, M>,
pub block_votes: HashMap<Option<H256>, HashMap<H520, Address>>, pub block_votes: HashMap<Option<H256>, HashMap<H520, Address>>,
messages: HashSet<M>, messages: HashSet<M>,
} }
#[derive(Debug)]
pub struct DoubleVote<'a, M: Message> {
pub author: &'a Address,
vote_one: M,
vote_two: M,
}
impl<'a, M: Message> Encodable for DoubleVote<'a, M> {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(2)
.append(&self.vote_one)
.append(&self.vote_two);
}
}
impl <M: Message> StepCollector<M> { impl <M: Message> StepCollector<M> {
/// Returns Some(&Address) when validator is double voting. /// Returns Some(&Address) when validator is double voting.
fn insert<'a>(&mut self, message: M, address: &'a Address) -> Option<&'a Address> { fn insert<'a>(&mut self, message: M, address: &'a Address) -> Option<DoubleVote<'a, M>> {
// Do nothing when message was seen. // Do nothing when message was seen.
if self.messages.insert(message.clone()) { if self.messages.insert(message.clone()) {
if self.voted.insert(address.clone()) { if let Some(previous) = self.voted.insert(address.clone(), message.clone()) {
// Bad validator sent a different message.
return Some(DoubleVote {
author: address,
vote_one: previous,
vote_two: message
});
} else {
self self
.block_votes .block_votes
.entry(message.block_hash()) .entry(message.block_hash())
.or_insert_with(HashMap::new) .or_insert_with(HashMap::new)
.insert(message.signature(), address.clone()); .insert(message.signature(), address.clone());
} else {
// Bad validator sent a different message.
return Some(address);
} }
} }
None None
@ -101,7 +120,7 @@ impl <M: Message + Default> Default for VoteCollector<M> {
impl <M: Message + Default + Encodable + Debug> VoteCollector<M> { impl <M: Message + Default + Encodable + Debug> VoteCollector<M> {
/// Insert vote if it is newer than the oldest one. /// Insert vote if it is newer than the oldest one.
pub fn vote<'a>(&self, message: M, voter: &'a Address) -> Option<&'a Address> { pub fn vote<'a>(&self, message: M, voter: &'a Address) -> Option<DoubleVote<'a, M>> {
self self
.votes .votes
.write() .write()
@ -220,11 +239,11 @@ mod tests {
} }
fn random_vote(collector: &VoteCollector<TestMessage>, signature: H520, step: TestStep, block_hash: Option<H256>) -> bool { fn random_vote(collector: &VoteCollector<TestMessage>, signature: H520, step: TestStep, block_hash: Option<H256>) -> bool {
full_vote(collector, signature, step, block_hash, &H160::random()).is_none() full_vote(collector, signature, step, block_hash, &H160::random())
} }
fn full_vote<'a>(collector: &VoteCollector<TestMessage>, signature: H520, step: TestStep, block_hash: Option<H256>, address: &'a Address) -> Option<&'a Address> { fn full_vote<'a>(collector: &VoteCollector<TestMessage>, signature: H520, step: TestStep, block_hash: Option<H256>, address: &'a Address) -> bool {
collector.vote(TestMessage { signature: signature, step: step, block_hash: block_hash }, address) collector.vote(TestMessage { signature: signature, step: step, block_hash: block_hash }, address).is_none()
} }
#[test] #[test]
@ -319,9 +338,9 @@ mod tests {
let collector = VoteCollector::default(); let collector = VoteCollector::default();
let round = 3; let round = 3;
// Vote is inserted fine. // Vote is inserted fine.
assert!(full_vote(&collector, H520::random(), round, Some("0".sha3()), &Address::default()).is_none()); assert!(full_vote(&collector, H520::random(), round, Some("0".sha3()), &Address::default()));
// Returns the double voting address. // Returns the double voting address.
full_vote(&collector, H520::random(), round, Some("1".sha3()), &Address::default()).unwrap(); assert!(!full_vote(&collector, H520::random(), round, Some("1".sha3()), &Address::default()));
assert_eq!(collector.count_round_votes(&round), 1); assert_eq!(collector.count_round_votes(&round), 1);
} }
} }

View File

@ -19,8 +19,8 @@ use util::*;
use block::*; use block::*;
use builtin::Builtin; use builtin::Builtin;
use env_info::EnvInfo; use env_info::EnvInfo;
use error::{BlockError, TransactionError, Error}; use error::{BlockError, Error, TransactionError};
use header::Header; use header::{Header, BlockNumber};
use state::CleanupMode; use state::CleanupMode;
use spec::CommonParams; use spec::CommonParams;
use transaction::UnverifiedTransaction; use transaction::UnverifiedTransaction;
@ -32,6 +32,10 @@ use rlp::{self, UntrustedRlp};
/// Parity tries to round block.gas_limit to multiple of this constant /// Parity tries to round block.gas_limit to multiple of this constant
pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]); pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]);
/// Number of blocks in an ethash snapshot.
// make dependent on difficulty incrment divisor?
const SNAPSHOT_BLOCKS: u64 = 30000;
/// Ethash params. /// Ethash params.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct EthashParams { pub struct EthashParams {
@ -139,17 +143,33 @@ pub struct Ethash {
impl Ethash { impl Ethash {
/// Create a new instance of Ethash engine /// Create a new instance of Ethash engine
pub fn new(params: CommonParams, ethash_params: EthashParams, builtins: BTreeMap<Address, Builtin>) -> Self { pub fn new(params: CommonParams, ethash_params: EthashParams, builtins: BTreeMap<Address, Builtin>) -> Arc<Self> {
Ethash { Arc::new(Ethash {
params: params, params: params,
ethash_params: ethash_params, ethash_params: ethash_params,
builtins: builtins, builtins: builtins,
pow: EthashManager::new(), pow: EthashManager::new(),
} })
} }
} }
impl Engine for Ethash { // TODO [rphmeier]
//
// for now, this is different than Ethash's own epochs, and signal
// "consensus epochs".
// in this sense, `Ethash` is epochless: the same `EpochVerifier` can be used
// for any block in the chain.
// in the future, we might move the Ethash epoch
// caching onto this mechanism as well.
impl ::engines::EpochVerifier for Arc<Ethash> {
fn epoch_number(&self) -> u64 { 0 }
fn verify_light(&self, _header: &Header) -> Result<(), Error> { Ok(()) }
fn verify_heavy(&self, header: &Header) -> Result<(), Error> {
self.verify_block_unordered(header, None)
}
}
impl Engine for Arc<Ethash> {
fn name(&self) -> &str { "Ethash" } fn name(&self) -> &str { "Ethash" }
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
// Two fields - mix // Two fields - mix
@ -167,19 +187,20 @@ impl Engine for Ethash {
map!["nonce".to_owned() => format!("0x{}", header.nonce().hex()), "mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())] map!["nonce".to_owned() => format!("0x{}", header.nonce().hex()), "mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())]
} }
fn schedule(&self, env_info: &EnvInfo) -> Schedule { fn schedule(&self, block_number: BlockNumber) -> Schedule {
trace!(target: "client", "Creating schedule. fCML={}, bGCML={}", self.ethash_params.homestead_transition, self.ethash_params.eip150_transition); trace!(target: "client", "Creating schedule. fCML={}, bGCML={}", self.ethash_params.homestead_transition, self.ethash_params.eip150_transition);
if env_info.number < self.ethash_params.homestead_transition { if block_number < self.ethash_params.homestead_transition {
Schedule::new_frontier() Schedule::new_frontier()
} else if env_info.number < self.ethash_params.eip150_transition { } else if block_number < self.ethash_params.eip150_transition {
Schedule::new_homestead() Schedule::new_homestead()
} else { } else {
Schedule::new_post_eip150( Schedule::new_post_eip150(
self.ethash_params.max_code_size as usize, self.ethash_params.max_code_size as usize,
env_info.number >= self.ethash_params.eip160_transition, block_number >= self.ethash_params.eip160_transition,
env_info.number >= self.ethash_params.eip161abc_transition, block_number >= self.ethash_params.eip161abc_transition,
env_info.number >= self.ethash_params.eip161d_transition block_number >= self.ethash_params.eip161d_transition,
block_number >= self.params.eip86_transition
) )
} }
} }
@ -369,22 +390,23 @@ impl Engine for Ethash {
} }
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> result::Result<(), Error> { fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> result::Result<(), Error> {
if header.number() >= self.ethash_params.homestead_transition {
t.check_low_s()?;
}
if let Some(n) = t.network_id() {
if header.number() < self.ethash_params.eip155_transition || n != self.params().chain_id {
return Err(TransactionError::InvalidNetworkId.into())
}
}
if header.number() >= self.ethash_params.min_gas_price_transition && t.gas_price < self.ethash_params.min_gas_price { if header.number() >= self.ethash_params.min_gas_price_transition && t.gas_price < self.ethash_params.min_gas_price {
return Err(TransactionError::InsufficientGasPrice { minimal: self.ethash_params.min_gas_price, got: t.gas_price }.into()); return Err(TransactionError::InsufficientGasPrice { minimal: self.ethash_params.min_gas_price, got: t.gas_price }.into());
} }
let check_low_s = header.number() >= self.ethash_params.homestead_transition;
let network_id = if header.number() >= self.ethash_params.eip155_transition { Some(self.params().chain_id) } else { None };
t.verify_basic(check_low_s, network_id, false)?;
Ok(()) Ok(())
} }
fn epoch_verifier(&self, _header: &Header, _proof: &[u8]) -> Result<Box<::engines::EpochVerifier>, Error> {
Ok(Box::new(self.clone()))
}
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
Some(Box::new(::snapshot::PowSnapshot(SNAPSHOT_BLOCKS)))
}
} }
// Try to round gas_limit a bit so that: // Try to round gas_limit a bit so that:
@ -512,7 +534,6 @@ mod tests {
use block::*; use block::*;
use tests::helpers::*; use tests::helpers::*;
use engines::Engine; use engines::Engine;
use env_info::EnvInfo;
use error::{BlockError, Error}; use error::{BlockError, Error};
use header::Header; use header::Header;
use super::super::{new_morden, new_homestead_test}; use super::super::{new_morden, new_homestead_test};
@ -524,8 +545,7 @@ mod tests {
let spec = new_morden(); let spec = new_morden();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close(); let b = b.close();
@ -537,8 +557,7 @@ mod tests {
let spec = new_morden(); let spec = new_morden();
let engine = &*spec.engine; let engine = &*spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let last_hashes = Arc::new(vec![genesis_header.hash()]); let last_hashes = Arc::new(vec![genesis_header.hash()]);
let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let mut uncle = Header::new(); let mut uncle = Header::new();
@ -561,28 +580,10 @@ mod tests {
#[test] #[test]
fn can_return_schedule() { fn can_return_schedule() {
let engine = new_morden().engine; let engine = new_morden().engine;
let schedule = engine.schedule(&EnvInfo { let schedule = engine.schedule(10000000);
number: 10000000,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0); assert!(schedule.stack_limit > 0);
let schedule = engine.schedule(&EnvInfo { let schedule = engine.schedule(100);
number: 100,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
});
assert!(!schedule.have_delegate_call); assert!(!schedule.have_delegate_call);
} }

View File

@ -94,8 +94,8 @@ mod tests {
let spec = new_morden(); let spec = new_morden();
let engine = &spec.engine; let engine = &spec.engine;
let genesis_header = spec.genesis_header(); let genesis_header = spec.genesis_header();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap();
assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()).unwrap(), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()).unwrap(), 1u64.into());
assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()).unwrap(), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()).unwrap(), 1u64.into());

View File

@ -41,6 +41,17 @@ pub enum MessageCallResult {
Failed Failed
} }
/// Specifies how an address is calculated for a new contract.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum CreateContractAddress {
/// Address is calculated from nonce and sender. Pre EIP-86 (Metropolis)
FromSenderAndNonce,
/// Address is calculated from code hash. Default since EIP-86
FromCodeHash,
/// Address is calculated from code hash and sender. Used by CREATE_P2SH instruction.
FromSenderAndCodeHash,
}
/// Externalities interface for EVMs /// Externalities interface for EVMs
// TODO: [rob] associated error type instead of `trie::Result`. Not all EVMs are trie powered. // TODO: [rob] associated error type instead of `trie::Result`. Not all EVMs are trie powered.
pub trait Ext { pub trait Ext {
@ -68,7 +79,7 @@ pub trait Ext {
/// Creates new contract. /// Creates new contract.
/// ///
/// Returns gas_left and contract address if contract creation was succesfull. /// Returns gas_left and contract address if contract creation was succesfull.
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult; fn create(&mut self, gas: &U256, value: &U256, code: &[u8], address: CreateContractAddress) -> ContractCreateResult;
/// Message call. /// Message call.
/// ///

View File

@ -278,6 +278,7 @@ lazy_static! {
arr[RETURN as usize] = InstructionInfo::new("RETURN", 0, 2, 0, true, GasPriceTier::Zero); arr[RETURN as usize] = InstructionInfo::new("RETURN", 0, 2, 0, true, GasPriceTier::Zero);
arr[DELEGATECALL as usize] = InstructionInfo::new("DELEGATECALL", 0, 6, 1, true, GasPriceTier::Special); arr[DELEGATECALL as usize] = InstructionInfo::new("DELEGATECALL", 0, 6, 1, true, GasPriceTier::Special);
arr[SUICIDE as usize] = InstructionInfo::new("SUICIDE", 0, 1, 0, true, GasPriceTier::Special); arr[SUICIDE as usize] = InstructionInfo::new("SUICIDE", 0, 1, 0, true, GasPriceTier::Special);
arr[CREATE2 as usize] = InstructionInfo::new("CREATE2", 0, 3, 1, true, GasPriceTier::Special);
arr arr
}; };
} }
@ -553,6 +554,8 @@ pub const CALLCODE: Instruction = 0xf2;
pub const RETURN: Instruction = 0xf3; pub const RETURN: Instruction = 0xf3;
/// like CALLCODE but keeps caller's value and sender /// like CALLCODE but keeps caller's value and sender
pub const DELEGATECALL: Instruction = 0xf4; pub const DELEGATECALL: Instruction = 0xf4;
/// create a new account and set creation address to sha3(sender + sha3(init code)) % 2**160
pub const CREATE2: Instruction = 0xfb;
/// halt execution and register account for later deletion /// halt execution and register account for later deletion
pub const SUICIDE: Instruction = 0xff; pub const SUICIDE: Instruction = 0xff;

View File

@ -223,7 +223,7 @@ impl<Gas: CostType> Gasometer<Gas> {
Request::GasMemProvide(gas, mem, Some(requested)) Request::GasMemProvide(gas, mem, Some(requested))
}, },
instructions::CREATE => { instructions::CREATE | instructions::CREATE2 => {
let gas = Gas::from(schedule.create_gas); let gas = Gas::from(schedule.create_gas);
let mem = mem_needed(stack.peek(1), stack.peek(2))?; let mem = mem_needed(stack.peek(1), stack.peek(2))?;

View File

@ -32,7 +32,7 @@ use std::marker::PhantomData;
use action_params::{ActionParams, ActionValue}; use action_params::{ActionParams, ActionValue};
use types::executed::CallType; use types::executed::CallType;
use evm::instructions::{self, Instruction, InstructionInfo}; use evm::instructions::{self, Instruction, InstructionInfo};
use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType}; use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType, CreateContractAddress};
use bit_set::BitSet; use bit_set::BitSet;
use util::*; use util::*;
@ -182,7 +182,9 @@ impl<Cost: CostType> Interpreter<Cost> {
fn verify_instruction(&self, ext: &evm::Ext, instruction: Instruction, info: &InstructionInfo, stack: &Stack<U256>) -> evm::Result<()> { fn verify_instruction(&self, ext: &evm::Ext, instruction: Instruction, info: &InstructionInfo, stack: &Stack<U256>) -> evm::Result<()> {
let schedule = ext.schedule(); let schedule = ext.schedule();
if !schedule.have_delegate_call && instruction == instructions::DELEGATECALL { if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) ||
(instruction == instructions::CREATE2 && !schedule.have_create2) {
return Err(evm::Error::BadInstruction { return Err(evm::Error::BadInstruction {
instruction: instruction instruction: instruction
}); });
@ -266,10 +268,12 @@ impl<Cost: CostType> Interpreter<Cost> {
instructions::JUMPDEST => { instructions::JUMPDEST => {
// ignore // ignore
}, },
instructions::CREATE => { instructions::CREATE | instructions::CREATE2 => {
let endowment = stack.pop_back(); let endowment = stack.pop_back();
let init_off = stack.pop_back(); let init_off = stack.pop_back();
let init_size = stack.pop_back(); let init_size = stack.pop_back();
let address_scheme = if instruction == instructions::CREATE { CreateContractAddress::FromSenderAndNonce } else { CreateContractAddress::FromSenderAndCodeHash };
let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed"); let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed");
let contract_code = self.mem.read_slice(init_off, init_size); let contract_code = self.mem.read_slice(init_off, init_size);
@ -280,7 +284,7 @@ impl<Cost: CostType> Interpreter<Cost> {
return Ok(InstructionResult::UnusedGas(create_gas)); return Ok(InstructionResult::UnusedGas(create_gas));
} }
let create_result = ext.create(&create_gas.as_u256(), &endowment, contract_code); let create_result = ext.create(&create_gas.as_u256(), &endowment, contract_code, address_scheme);
return match create_result { return match create_result {
ContractCreateResult::Created(address, gas_left) => { ContractCreateResult::Created(address, gas_left) => {
stack.push(address_to_u256(address)); stack.push(address_to_u256(address));

View File

@ -32,7 +32,7 @@ mod tests;
mod benches; mod benches;
pub use self::evm::{Evm, Error, Finalize, GasLeft, Result, CostType}; pub use self::evm::{Evm, Error, Finalize, GasLeft, Result, CostType};
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult}; pub use self::ext::{Ext, ContractCreateResult, MessageCallResult, CreateContractAddress};
pub use self::factory::{Factory, VMType}; pub use self::factory::{Factory, VMType};
pub use self::schedule::Schedule; pub use self::schedule::Schedule;
pub use types::executed::CallType; pub use types::executed::CallType;

View File

@ -22,6 +22,8 @@ pub struct Schedule {
pub exceptional_failed_code_deposit: bool, pub exceptional_failed_code_deposit: bool,
/// Does it have a delegate cal /// Does it have a delegate cal
pub have_delegate_call: bool, pub have_delegate_call: bool,
/// Does it have a CREATE_P2SH instruction
pub have_create2: bool,
/// VM stack limit /// VM stack limit
pub stack_limit: usize, pub stack_limit: usize,
/// Max number of nested calls/creates /// Max number of nested calls/creates
@ -113,10 +115,11 @@ impl Schedule {
} }
/// Schedule for the post-EIP-150-era of the Ethereum main net. /// Schedule for the post-EIP-150-era of the Ethereum main net.
pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool) -> Schedule { pub fn new_post_eip150(max_code_size: usize, fix_exp: bool, no_empty: bool, kill_empty: bool, have_metropolis_instructions: bool) -> Schedule {
Schedule { Schedule {
exceptional_failed_code_deposit: true, exceptional_failed_code_deposit: true,
have_delegate_call: true, have_delegate_call: true,
have_create2: have_metropolis_instructions,
stack_limit: 1024, stack_limit: 1024,
max_depth: 1024, max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],
@ -158,10 +161,16 @@ impl Schedule {
} }
} }
/// Schedule for the Metropolis of the Ethereum main net.
pub fn new_metropolis() -> Schedule {
Self::new_post_eip150(24576, true, true, true, true)
}
fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule { fn new(efcd: bool, hdc: bool, tcg: usize) -> Schedule {
Schedule { Schedule {
exceptional_failed_code_deposit: efcd, exceptional_failed_code_deposit: efcd,
have_delegate_call: hdc, have_delegate_call: hdc,
have_create2: false,
stack_limit: 1024, stack_limit: 1024,
max_depth: 1024, max_depth: 1024,
tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0], tier_step_gas: [0, 2, 3, 5, 8, 10, 20, 0],

View File

@ -18,7 +18,7 @@ use util::*;
use action_params::{ActionParams, ActionValue}; use action_params::{ActionParams, ActionValue};
use env_info::EnvInfo; use env_info::EnvInfo;
use types::executed::CallType; use types::executed::CallType;
use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult}; use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult, CreateContractAddress};
use std::fmt::Debug; use std::fmt::Debug;
pub struct FakeLogEntry { pub struct FakeLogEntry {
@ -111,7 +111,7 @@ impl Ext for FakeExt {
self.blockhashes.get(number).unwrap_or(&H256::new()).clone() self.blockhashes.get(number).unwrap_or(&H256::new()).clone()
} }
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { fn create(&mut self, gas: &U256, value: &U256, code: &[u8], _address: CreateContractAddress) -> ContractCreateResult {
self.calls.insert(FakeCall { self.calls.insert(FakeCall {
call_type: FakeCallType::Create, call_type: FakeCallType::Create,
gas: *gas, gas: *gas,

View File

@ -22,7 +22,7 @@ use engines::Engine;
use types::executed::CallType; use types::executed::CallType;
use env_info::EnvInfo; use env_info::EnvInfo;
use error::ExecutionError; use error::ExecutionError;
use evm::{self, Ext, Factory, Finalize}; use evm::{self, Ext, Factory, Finalize, CreateContractAddress};
use externalities::*; use externalities::*;
use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer}; use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer};
use transaction::{Action, SignedTransaction}; use transaction::{Action, SignedTransaction};
@ -34,14 +34,29 @@ pub use types::executed::{Executed, ExecutionResult};
/// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp` /// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp`
const STACK_SIZE_PER_DEPTH: usize = 24*1024; const STACK_SIZE_PER_DEPTH: usize = 24*1024;
/// Returns new address created from address and given nonce. /// Returns new address created from address, nonce, and code hash
pub fn contract_address(address: &Address, nonce: &U256) -> Address { pub fn contract_address(address_scheme: CreateContractAddress, sender: &Address, nonce: &U256, code_hash: &H256) -> Address {
use rlp::RlpStream; use rlp::RlpStream;
match address_scheme {
CreateContractAddress::FromSenderAndNonce => {
let mut stream = RlpStream::new_list(2); let mut stream = RlpStream::new_list(2);
stream.append(address); stream.append(sender);
stream.append(nonce); stream.append(nonce);
From::from(stream.out().sha3()) From::from(stream.as_raw().sha3())
},
CreateContractAddress::FromCodeHash => {
let mut buffer = [0xffu8; 20 + 32];
&mut buffer[20..].copy_from_slice(&code_hash[..]);
From::from((&buffer[..]).sha3())
},
CreateContractAddress::FromSenderAndCodeHash => {
let mut buffer = [0u8; 20 + 32];
&mut buffer[..20].copy_from_slice(&sender[..]);
&mut buffer[20..].copy_from_slice(&code_hash[..]);
From::from((&buffer[..]).sha3())
},
}
} }
/// Transaction execution options. /// Transaction execution options.
@ -125,7 +140,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
let sender = t.sender(); let sender = t.sender();
let nonce = self.state.nonce(&sender)?; let nonce = self.state.nonce(&sender)?;
let schedule = self.engine.schedule(self.info); let schedule = self.engine.schedule(self.info.number);
let base_gas_required = U256::from(t.gas_required(&schedule)); let base_gas_required = U256::from(t.gas_required(&schedule));
if t.gas < base_gas_required { if t.gas < base_gas_required {
@ -160,17 +175,20 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
} }
// NOTE: there can be no invalid transactions from this point. // NOTE: there can be no invalid transactions from this point.
if !t.is_unsigned() {
self.state.inc_nonce(&sender)?; self.state.inc_nonce(&sender)?;
}
self.state.sub_balance(&sender, &U256::from(gas_cost))?; self.state.sub_balance(&sender, &U256::from(gas_cost))?;
let mut substate = Substate::new(); let mut substate = Substate::new();
let (gas_left, output) = match t.action { let (gas_left, output) = match t.action {
Action::Create => { Action::Create => {
let new_address = contract_address(&sender, &nonce); let code_hash = t.data.sha3();
let new_address = contract_address(self.engine.create_address_scheme(self.info.number), &sender, &nonce, &code_hash);
let params = ActionParams { let params = ActionParams {
code_address: new_address.clone(), code_address: new_address.clone(),
code_hash: t.data.sha3(), code_hash: code_hash,
address: new_address, address: new_address,
sender: sender.clone(), sender: sender.clone(),
origin: sender.clone(), origin: sender.clone(),
@ -253,7 +271,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
// backup used in case of running out of gas // backup used in case of running out of gas
self.state.checkpoint(); self.state.checkpoint();
let schedule = self.engine.schedule(self.info); let schedule = self.engine.schedule(self.info.number);
// at first, transfer value to destination // at first, transfer value to destination
if let ActionValue::Transfer(val) = params.value { if let ActionValue::Transfer(val) = params.value {
@ -365,8 +383,14 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
params: ActionParams, params: ActionParams,
substate: &mut Substate, substate: &mut Substate,
tracer: &mut T, tracer: &mut T,
vm_tracer: &mut V vm_tracer: &mut V,
) -> evm::Result<U256> where T: Tracer, V: VMTracer { ) -> evm::Result<U256> where T: Tracer, V: VMTracer {
let scheme = self.engine.create_address_scheme(self.info.number);
if scheme != CreateContractAddress::FromSenderAndNonce && self.state.exists_and_has_code(&params.address)? {
return Err(evm::Error::OutOfGas);
}
// backup used in case of running out of gas // backup used in case of running out of gas
self.state.checkpoint(); self.state.checkpoint();
@ -374,7 +398,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
let mut unconfirmed_substate = Substate::new(); let mut unconfirmed_substate = Substate::new();
// create contract and transfer value to it if necessary // create contract and transfer value to it if necessary
let schedule = self.engine.schedule(self.info); let schedule = self.engine.schedule(self.info.number);
let nonce_offset = if schedule.no_empty {1} else {0}.into(); let nonce_offset = if schedule.no_empty {1} else {0}.into();
let prev_bal = self.state.balance(&params.address)?; let prev_bal = self.state.balance(&params.address)?;
if let ActionValue::Transfer(val) = params.value { if let ActionValue::Transfer(val) = params.value {
@ -423,7 +447,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
trace: Vec<FlatTrace>, trace: Vec<FlatTrace>,
vm_trace: Option<VMTrace> vm_trace: Option<VMTrace>
) -> ExecutionResult { ) -> ExecutionResult {
let schedule = self.engine.schedule(self.info); let schedule = self.engine.schedule(self.info.number);
// refunds from SSTORE nonzero -> zero // refunds from SSTORE nonzero -> zero
let sstore_refunds = U256::from(schedule.sstore_refund_gas) * substate.sstore_clears_count; let sstore_refunds = U256::from(schedule.sstore_refund_gas) * substate.sstore_clears_count;
@ -525,7 +549,7 @@ mod tests {
use util::bytes::BytesRef; use util::bytes::BytesRef;
use action_params::{ActionParams, ActionValue}; use action_params::{ActionParams, ActionValue};
use env_info::EnvInfo; use env_info::EnvInfo;
use evm::{Factory, VMType}; use evm::{Factory, VMType, CreateContractAddress};
use error::ExecutionError; use error::ExecutionError;
use state::{Substate, CleanupMode}; use state::{Substate, CleanupMode};
use tests::helpers::*; use tests::helpers::*;
@ -540,22 +564,21 @@ mod tests {
fn test_contract_address() { fn test_contract_address() {
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let expected_address = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); let expected_address = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap();
assert_eq!(expected_address, contract_address(&address, &U256::from(88))); assert_eq!(expected_address, contract_address(CreateContractAddress::FromSenderAndNonce, &address, &U256::from(88), &H256::default()));
} }
// TODO: replace params with transactions! // TODO: replace params with transactions!
evm_test!{test_sender_balance: test_sender_balance_jit, test_sender_balance_int} evm_test!{test_sender_balance: test_sender_balance_jit, test_sender_balance_int}
fn test_sender_balance(factory: Factory) { fn test_sender_balance(factory: Factory) {
let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
params.address = address.clone(); params.address = address.clone();
params.sender = sender.clone(); params.sender = sender.clone();
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(Arc::new("3331600055".from_hex().unwrap())); params.code = Some(Arc::new("3331600055".from_hex().unwrap()));
params.value = ActionValue::Transfer(U256::from(0x7)); params.value = ActionValue::Transfer(U256::from(0x7));
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(0x100u64), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let engine = TestEngine::new(0);
@ -603,7 +626,7 @@ mod tests {
let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap(); let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap();
let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
// TODO: add tests for 'callcreate' // TODO: add tests for 'callcreate'
//let next_address = contract_address(&address, &U256::zero()); //let next_address = contract_address(&address, &U256::zero());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
@ -613,8 +636,7 @@ mod tests {
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(Arc::new(code)); params.code = Some(Arc::new(code));
params.value = ActionValue::Transfer(U256::from(100)); params.value = ActionValue::Transfer(U256::from(100));
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let engine = TestEngine::new(0);
@ -660,7 +682,7 @@ mod tests {
let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap(); let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0600055".from_hex().unwrap();
let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
// TODO: add tests for 'callcreate' // TODO: add tests for 'callcreate'
//let next_address = contract_address(&address, &U256::zero()); //let next_address = contract_address(&address, &U256::zero());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
@ -672,8 +694,7 @@ mod tests {
params.code = Some(Arc::new(code)); params.code = Some(Arc::new(code));
params.value = ActionValue::Transfer(U256::from(100)); params.value = ActionValue::Transfer(U256::from(100));
params.call_type = CallType::Call; params.call_type = CallType::Call;
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(5); let engine = TestEngine::new(5);
@ -773,7 +794,7 @@ mod tests {
let code = "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(); let code = "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap();
let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
// TODO: add tests for 'callcreate' // TODO: add tests for 'callcreate'
//let next_address = contract_address(&address, &U256::zero()); //let next_address = contract_address(&address, &U256::zero());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
@ -783,8 +804,7 @@ mod tests {
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(Arc::new(code)); params.code = Some(Arc::new(code));
params.value = ActionValue::Transfer(100.into()); params.value = ActionValue::Transfer(100.into());
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(5); let engine = TestEngine::new(5);
@ -861,7 +881,7 @@ mod tests {
let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d600360e6f0600055".from_hex().unwrap(); let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d600360e6f0600055".from_hex().unwrap();
let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
// TODO: add tests for 'callcreate' // TODO: add tests for 'callcreate'
//let next_address = contract_address(&address, &U256::zero()); //let next_address = contract_address(&address, &U256::zero());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
@ -871,8 +891,7 @@ mod tests {
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(Arc::new(code)); params.code = Some(Arc::new(code));
params.value = ActionValue::Transfer(U256::from(100)); params.value = ActionValue::Transfer(U256::from(100));
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let engine = TestEngine::new(0);
@ -914,8 +933,8 @@ mod tests {
let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0".from_hex().unwrap(); let code = "7c601080600c6000396000f3006000355415600957005b60203560003555600052601d60036017f0".from_hex().unwrap();
let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
let next_address = contract_address(&address, &U256::zero()); let next_address = contract_address(CreateContractAddress::FromSenderAndNonce, &address, &U256::zero(), &H256::default());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
params.address = address.clone(); params.address = address.clone();
params.sender = sender.clone(); params.sender = sender.clone();
@ -923,8 +942,7 @@ mod tests {
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(Arc::new(code)); params.code = Some(Arc::new(code));
params.value = ActionValue::Transfer(U256::from(100)); params.value = ActionValue::Transfer(U256::from(100));
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(1024); let engine = TestEngine::new(1024);
@ -981,8 +999,7 @@ mod tests {
params.code = Some(Arc::new(code_a.clone())); params.code = Some(Arc::new(code_a.clone()));
params.value = ActionValue::Transfer(U256::from(100_000)); params.value = ActionValue::Transfer(U256::from(100_000));
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.init_code(&address_a, code_a.clone()).unwrap(); state.init_code(&address_a, code_a.clone()).unwrap();
state.init_code(&address_b, code_b.clone()).unwrap(); state.init_code(&address_b, code_b.clone()).unwrap();
state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100_000), CleanupMode::NoEmpty).unwrap();
@ -1024,13 +1041,12 @@ mod tests {
// 55 - sstore // 55 - sstore
let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap(); let sender = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
let code = "600160005401600055600060006000600060003060e05a03f1600155".from_hex().unwrap(); let code = "600160005401600055600060006000600060003060e05a03f1600155".from_hex().unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
params.address = address.clone(); params.address = address.clone();
params.gas = U256::from(100_000); params.gas = U256::from(100_000);
params.code = Some(Arc::new(code.clone())); params.code = Some(Arc::new(code.clone()));
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.init_code(&address, code).unwrap(); state.init_code(&address, code).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let engine = TestEngine::new(0);
@ -1060,10 +1076,9 @@ mod tests {
nonce: U256::zero() nonce: U256::zero()
}.sign(keypair.secret(), None); }.sign(keypair.secret(), None);
let sender = t.sender(); let sender = t.sender();
let contract = contract_address(&sender, &U256::zero()); let contract = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(18), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
@ -1100,8 +1115,7 @@ mod tests {
}.sign(keypair.secret(), None); }.sign(keypair.secret(), None);
let sender = t.sender(); let sender = t.sender();
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
@ -1133,8 +1147,7 @@ mod tests {
}.sign(keypair.secret(), None); }.sign(keypair.secret(), None);
let sender = t.sender(); let sender = t.sender();
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(17), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_used = U256::from(20_000); info.gas_used = U256::from(20_000);
@ -1168,8 +1181,7 @@ mod tests {
}.sign(keypair.secret(), None); }.sign(keypair.secret(), None);
let sender = t.sender(); let sender = t.sender();
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from(100_017), CleanupMode::NoEmpty).unwrap();
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = U256::from(100_000); info.gas_limit = U256::from(100_000);
@ -1193,7 +1205,7 @@ mod tests {
let code = "6064640fffffffff20600055".from_hex().unwrap(); let code = "6064640fffffffff20600055".from_hex().unwrap();
let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
let address = contract_address(&sender, &U256::zero()); let address = contract_address(CreateContractAddress::FromSenderAndNonce, &sender, &U256::zero(), &H256::default());
// TODO: add tests for 'callcreate' // TODO: add tests for 'callcreate'
//let next_address = contract_address(&address, &U256::zero()); //let next_address = contract_address(&address, &U256::zero());
let mut params = ActionParams::default(); let mut params = ActionParams::default();
@ -1203,8 +1215,7 @@ mod tests {
params.gas = U256::from(0x0186a0); params.gas = U256::from(0x0186a0);
params.code = Some(Arc::new(code)); params.code = Some(Arc::new(code));
params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap());
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap(); state.add_balance(&sender, &U256::from_str("152d02c7e14af6800000").unwrap(), CleanupMode::NoEmpty).unwrap();
let info = EnvInfo::default(); let info = EnvInfo::default();
let engine = TestEngine::new(0); let engine = TestEngine::new(0);

View File

@ -21,8 +21,9 @@ use state::{Backend as StateBackend, State, Substate};
use engines::Engine; use engines::Engine;
use env_info::EnvInfo; use env_info::EnvInfo;
use executive::*; use executive::*;
use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory}; use evm::{self, Schedule, Ext, ContractCreateResult, MessageCallResult, Factory, CreateContractAddress};
use types::executed::CallType; use types::executed::CallType;
use types::transaction::UNSIGNED_SENDER;
use trace::{Tracer, VMTracer}; use trace::{Tracer, VMTracer};
/// Policy for handling output data on `RETURN` opcode. /// Policy for handling output data on `RETURN` opcode.
@ -97,7 +98,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Externalities<'a, T, V, B>
depth: depth, depth: depth,
origin_info: origin_info, origin_info: origin_info,
substate: substate, substate: substate,
schedule: engine.schedule(env_info), schedule: engine.schedule(env_info.number),
output: output, output: output,
tracer: tracer, tracer: tracer,
vm_tracer: vm_tracer, vm_tracer: vm_tracer,
@ -147,10 +148,11 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
} }
} }
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { fn create(&mut self, gas: &U256, value: &U256, code: &[u8], address_scheme: CreateContractAddress) -> ContractCreateResult {
// create new contract address // create new contract address
let code_hash = code.sha3();
let address = match self.state.nonce(&self.origin_info.address) { let address = match self.state.nonce(&self.origin_info.address) {
Ok(nonce) => contract_address(&self.origin_info.address, &nonce), Ok(nonce) => contract_address(address_scheme, &self.origin_info.address, &nonce, &code_hash),
Err(e) => { Err(e) => {
debug!(target: "ext", "Database corruption encountered: {:?}", e); debug!(target: "ext", "Database corruption encountered: {:?}", e);
return ContractCreateResult::Failed return ContractCreateResult::Failed
@ -167,15 +169,17 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B>
gas_price: self.origin_info.gas_price, gas_price: self.origin_info.gas_price,
value: ActionValue::Transfer(*value), value: ActionValue::Transfer(*value),
code: Some(Arc::new(code.to_vec())), code: Some(Arc::new(code.to_vec())),
code_hash: code.sha3(), code_hash: code_hash,
data: None, data: None,
call_type: CallType::None, call_type: CallType::None,
}; };
if params.sender != UNSIGNED_SENDER {
if let Err(e) = self.state.inc_nonce(&self.origin_info.address) { if let Err(e) = self.state.inc_nonce(&self.origin_info.address) {
debug!(target: "ext", "Database corruption encountered: {:?}", e); debug!(target: "ext", "Database corruption encountered: {:?}", e);
return ContractCreateResult::Failed return ContractCreateResult::Failed
} }
}
let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth); let mut ex = Executive::from_parent(self.state, self.env_info, self.engine, self.vm_factory, self.depth);
// TODO: handle internal error separately // TODO: handle internal error separately
@ -346,7 +350,6 @@ mod tests {
use evm::Ext; use evm::Ext;
use state::{State, Substate}; use state::{State, Substate};
use tests::helpers::*; use tests::helpers::*;
use devtools::GuardedTempResult;
use super::*; use super::*;
use trace::{NoopTracer, NoopVMTracer}; use trace::{NoopTracer, NoopVMTracer};
use types::executed::CallType; use types::executed::CallType;
@ -373,7 +376,7 @@ mod tests {
} }
struct TestSetup { struct TestSetup {
state: GuardedTempResult<State<::state_db::StateDB>>, state: State<::state_db::StateDB>,
engine: Arc<Engine>, engine: Arc<Engine>,
sub_state: Substate, sub_state: Substate,
env_info: EnvInfo env_info: EnvInfo
@ -399,7 +402,7 @@ mod tests {
#[test] #[test]
fn can_be_created() { fn can_be_created() {
let mut setup = TestSetup::new(); let mut setup = TestSetup::new();
let state = setup.state.reference_mut(); let state = &mut setup.state;
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
@ -412,7 +415,7 @@ mod tests {
#[test] #[test]
fn can_return_block_hash_no_env() { fn can_return_block_hash_no_env() {
let mut setup = TestSetup::new(); let mut setup = TestSetup::new();
let state = setup.state.reference_mut(); let state = &mut setup.state;
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
@ -437,7 +440,7 @@ mod tests {
last_hashes.push(test_hash.clone()); last_hashes.push(test_hash.clone());
env_info.last_hashes = Arc::new(last_hashes); env_info.last_hashes = Arc::new(last_hashes);
} }
let state = setup.state.reference_mut(); let state = &mut setup.state;
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
@ -453,7 +456,7 @@ mod tests {
#[should_panic] #[should_panic]
fn can_call_fail_empty() { fn can_call_fail_empty() {
let mut setup = TestSetup::new(); let mut setup = TestSetup::new();
let state = setup.state.reference_mut(); let state = &mut setup.state;
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
@ -481,7 +484,7 @@ mod tests {
let log_topics = vec![H256::from("af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd")]; let log_topics = vec![H256::from("af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd")];
let mut setup = TestSetup::new(); let mut setup = TestSetup::new();
let state = setup.state.reference_mut(); let state = &mut setup.state;
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;
@ -499,7 +502,7 @@ mod tests {
let refund_account = &Address::new(); let refund_account = &Address::new();
let mut setup = TestSetup::new(); let mut setup = TestSetup::new();
let state = setup.state.reference_mut(); let state = &mut setup.state;
let mut tracer = NoopTracer; let mut tracer = NoopTracer;
let mut vm_tracer = NoopVMTracer; let mut vm_tracer = NoopVMTracer;

View File

@ -51,7 +51,7 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
ChainEra::_Eip161 => ethereum::new_eip161_test(), ChainEra::_Eip161 => ethereum::new_eip161_test(),
ChainEra::TransitionTest => ethereum::new_transition_test(), ChainEra::TransitionTest => ethereum::new_transition_test(),
}; };
spec.set_genesis_state(state); spec.set_genesis_state(state).expect("Failed to overwrite genesis state");
spec.overwrite_genesis_params(genesis); spec.overwrite_genesis_params(genesis);
assert!(spec.is_state_root_valid()); assert!(spec.is_state_root_valid());
spec spec

View File

@ -21,7 +21,7 @@ use executive::*;
use engines::Engine; use engines::Engine;
use env_info::EnvInfo; use env_info::EnvInfo;
use evm; use evm;
use evm::{Schedule, Ext, Factory, Finalize, VMType, ContractCreateResult, MessageCallResult}; use evm::{Schedule, Ext, Factory, Finalize, VMType, ContractCreateResult, MessageCallResult, CreateContractAddress};
use externalities::*; use externalities::*;
use types::executed::CallType; use types::executed::CallType;
use tests::helpers::*; use tests::helpers::*;
@ -56,7 +56,8 @@ struct TestExt<'a, T: 'a, V: 'a, B: 'a>
{ {
ext: Externalities<'a, T, V, B>, ext: Externalities<'a, T, V, B>,
callcreates: Vec<CallCreate>, callcreates: Vec<CallCreate>,
contract_address: Address nonce: U256,
sender: Address,
} }
impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B> impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B>
@ -76,9 +77,10 @@ impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B>
vm_tracer: &'a mut V, vm_tracer: &'a mut V,
) -> trie::Result<Self> { ) -> trie::Result<Self> {
Ok(TestExt { Ok(TestExt {
contract_address: contract_address(&address, &state.nonce(&address)?), nonce: state.nonce(&address)?,
ext: Externalities::new(state, info, engine, vm_factory, depth, origin_info, substate, output, tracer, vm_tracer), ext: Externalities::new(state, info, engine, vm_factory, depth, origin_info, substate, output, tracer, vm_tracer),
callcreates: vec![] callcreates: vec![],
sender: address,
}) })
} }
} }
@ -114,14 +116,15 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B>
self.ext.blockhash(number) self.ext.blockhash(number)
} }
fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { fn create(&mut self, gas: &U256, value: &U256, code: &[u8], address: CreateContractAddress) -> ContractCreateResult {
self.callcreates.push(CallCreate { self.callcreates.push(CallCreate {
data: code.to_vec(), data: code.to_vec(),
destination: None, destination: None,
gas_limit: *gas, gas_limit: *gas,
value: *value value: *value
}); });
ContractCreateResult::Created(self.contract_address.clone(), *gas) let contract_address = contract_address(address, &self.sender, &self.nonce, &code.sha3());
ContractCreateResult::Created(contract_address, *gas)
} }
fn call(&mut self, fn call(&mut self,
@ -215,8 +218,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
} }
let out_of_gas = vm.out_of_gas(); let out_of_gas = vm.out_of_gas();
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.populate_from(From::from(vm.pre_state.clone())); state.populate_from(From::from(vm.pre_state.clone()));
let info = From::from(vm.env); let info = From::from(vm.env);
let engine = TestEngine::new(1); let engine = TestEngine::new(1);

View File

@ -21,6 +21,8 @@ use ethereum;
use spec::Spec; use spec::Spec;
use ethjson; use ethjson;
use ethjson::state::test::ForkSpec; use ethjson::state::test::ForkSpec;
use types::transaction::SignedTransaction;
use env_info::EnvInfo;
lazy_static! { lazy_static! {
pub static ref FRONTIER: Spec = ethereum::new_frontier_test(); pub static ref FRONTIER: Spec = ethereum::new_frontier_test();
@ -37,7 +39,7 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
for (name, test) in tests.into_iter() { for (name, test) in tests.into_iter() {
{ {
let multitransaction = test.transaction; let multitransaction = test.transaction;
let env = test.env.into(); let env: EnvInfo = test.env.into();
let pre: PodState = test.pre_state.into(); let pre: PodState = test.pre_state.into();
for (spec, states) in test.post_states { for (spec, states) in test.post_states {
@ -54,12 +56,15 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
let info = format!(" - {} | {:?} ({}/{}) ...", name, spec, i + 1, total); let info = format!(" - {} | {:?} ({}/{}) ...", name, spec, i + 1, total);
let post_root: H256 = state.hash.into(); let post_root: H256 = state.hash.into();
let transaction = multitransaction.select(&state.indexes).into(); let transaction: SignedTransaction = multitransaction.select(&state.indexes).into();
let mut state = get_temp_state();
let mut state = get_temp_mem_state();
state.populate_from(pre.clone()); state.populate_from(pre.clone());
if transaction.verify_basic(true, None, env.number >= engine.params().eip86_transition).is_ok() {
state.commit().expect(&format!("State test {} failed due to internal error.", name)); state.commit().expect(&format!("State test {} failed due to internal error.", name));
let _res = state.apply(&env, &**engine, &transaction, false); let _res = state.apply(&env, &**engine, &transaction, false);
} else {
let _rest = state.commit();
}
if state.root() != &post_root { if state.root() != &post_root {
println!("{} !!! State mismatch (got: {}, expect: {}", info, state.root(), post_root); println!("{} !!! State mismatch (got: {}, expect: {}", info, state.root(), post_root);
flushln!("{} fail", info); flushln!("{} fail", info);
@ -73,7 +78,9 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
} }
println!("!!! {:?} tests from failed.", failed.len()); if !failed.is_empty() {
println!("!!! {:?} tests failed.", failed.len());
}
failed failed
} }

View File

@ -18,35 +18,37 @@ use super::test_common::*;
use evm; use evm;
use ethjson; use ethjson;
use rlp::UntrustedRlp; use rlp::UntrustedRlp;
use transaction::{Action, UnverifiedTransaction}; use transaction::{Action, UnverifiedTransaction, SignedTransaction};
use ethstore::ethkey::public_to_address;
fn do_json_test(json_data: &[u8]) -> Vec<String> { fn do_json_test(json_data: &[u8]) -> Vec<String> {
let tests = ethjson::transaction::Test::load(json_data).unwrap(); let tests = ethjson::transaction::Test::load(json_data).unwrap();
let mut failed = Vec::new(); let mut failed = Vec::new();
let old_schedule = evm::Schedule::new_frontier(); let frontier_schedule = evm::Schedule::new_frontier();
let new_schedule = evm::Schedule::new_homestead(); let homestead_schedule = evm::Schedule::new_homestead();
let metropolis_schedule = evm::Schedule::new_metropolis();
for (name, test) in tests.into_iter() { for (name, test) in tests.into_iter() {
let mut fail_unless = |cond: bool, title: &str| if !cond { failed.push(name.clone()); println!("Transaction failed: {:?}: {:?}", name, title); }; let mut fail_unless = |cond: bool, title: &str| if !cond { failed.push(name.clone()); println!("Transaction failed: {:?}: {:?}", name, title); };
let number: Option<u64> = test.block_number.map(Into::into); let number: Option<u64> = test.block_number.map(Into::into);
let schedule = match number { let schedule = match number {
None => &old_schedule, None => &frontier_schedule,
Some(x) if x < 1_150_000 => &old_schedule, Some(x) if x < 1_150_000 => &frontier_schedule,
Some(_) => &new_schedule Some(x) if x < 3_000_000 => &homestead_schedule,
Some(_) => &metropolis_schedule
}; };
let allow_network_id_of_one = number.map_or(false, |n| n >= 2_675_000); let allow_network_id_of_one = number.map_or(false, |n| n >= 2_675_000);
let allow_unsigned = number.map_or(false, |n| n >= 3_000_000);
let rlp: Vec<u8> = test.rlp.into(); let rlp: Vec<u8> = test.rlp.into();
let res = UntrustedRlp::new(&rlp) let res = UntrustedRlp::new(&rlp)
.as_val() .as_val()
.map_err(From::from) .map_err(From::from)
.and_then(|t: UnverifiedTransaction| t.validate(schedule, schedule.have_delegate_call, allow_network_id_of_one)); .and_then(|t: UnverifiedTransaction| t.validate(schedule, schedule.have_delegate_call, allow_network_id_of_one, allow_unsigned));
fail_unless(test.transaction.is_none() == res.is_err(), "Validity different"); fail_unless(test.transaction.is_none() == res.is_err(), "Validity different");
if let (Some(tx), Some(sender)) = (test.transaction, test.sender) { if let (Some(tx), Some(sender)) = (test.transaction, test.sender) {
let t = res.unwrap(); let t = res.unwrap();
fail_unless(public_to_address(&t.recover_public().unwrap()) == sender.into(), "sender mismatch"); fail_unless(SignedTransaction::new(t.clone()).unwrap().sender() == sender.into(), "sender mismatch");
let is_acceptable_network_id = match t.network_id() { let is_acceptable_network_id = match t.network_id() {
None => true, None => true,
Some(1) if allow_network_id_of_one => true, Some(1) if allow_network_id_of_one => true,
@ -84,3 +86,7 @@ declare_test!{TransactionTests_Homestead_ttTransactionTestEip155VitaliksTests, "
declare_test!{TransactionTests_EIP155_ttTransactionTest, "TransactionTests/EIP155/ttTransactionTest"} declare_test!{TransactionTests_EIP155_ttTransactionTest, "TransactionTests/EIP155/ttTransactionTest"}
declare_test!{TransactionTests_EIP155_ttTransactionTestEip155VitaliksTests, "TransactionTests/EIP155/ttTransactionTestEip155VitaliksTests"} declare_test!{TransactionTests_EIP155_ttTransactionTestEip155VitaliksTests, "TransactionTests/EIP155/ttTransactionTestEip155VitaliksTests"}
declare_test!{TransactionTests_EIP155_ttTransactionTestVRule, "TransactionTests/EIP155/ttTransactionTestVRule"} declare_test!{TransactionTests_EIP155_ttTransactionTestVRule, "TransactionTests/EIP155/ttTransactionTestVRule"}
declare_test!{TransactionTests_Metropolis_ttMetropolisTest, "TransactionTests/Metropolis/ttMetropolisTest"}
declare_test!{TransactionTests_Metropolis_ttTransactionTest, "TransactionTests/Metropolis/ttTransactionTest"}
declare_test!{TransactionTests_Metropolis_ttTransactionTestZeroSig, "TransactionTests/Metropolis/ttTransactionTestZeroSig"}

View File

@ -170,3 +170,4 @@ mod json_tests;
pub use types::*; pub use types::*;
pub use executive::contract_address; pub use executive::contract_address;
pub use evm::CreateContractAddress;

View File

@ -238,7 +238,7 @@ impl Migration for OverlayRecentV7 {
} }
let mut count = 0; let mut count = 0;
for (key, value) in source.iter(None) { for (key, value) in source.iter(None).into_iter().flat_map(|inner| inner) {
count += 1; count += 1;
if count == 100_000 { if count == 100_000 {
count = 0; count = 0;

View File

@ -102,7 +102,7 @@ impl Migration for ToV10 {
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> { fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, col); let mut batch = Batch::new(config, col);
for (key, value) in source.iter(col) { for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) {
self.progress.tick(); self.progress.tick();
batch.insert(key.to_vec(), value.to_vec(), dest)?; batch.insert(key.to_vec(), value.to_vec(), dest)?;
} }

View File

@ -59,7 +59,7 @@ impl Migration for ToV9 {
fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> { fn migrate(&mut self, source: Arc<Database>, config: &Config, dest: &mut Database, col: Option<u32>) -> Result<(), Error> {
let mut batch = Batch::new(config, self.column); let mut batch = Batch::new(config, self.column);
for (key, value) in source.iter(col) { for (key, value) in source.iter(col).into_iter().flat_map(|inner| inner) {
self.progress.tick(); self.progress.tick();
match self.extract { match self.extract {
Extract::Header => { Extract::Header => {

View File

@ -163,7 +163,7 @@ impl GasPriceCalibrator {
let wei_per_usd: f32 = 1.0e18 / usd_per_eth; let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
let gas_per_tx: f32 = 21000.0; let gas_per_tx: f32 = 21000.0;
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx; let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
info!(target: "miner", "Updated conversion rate to Ξ1 = {} ({} wei/gas)", Colour::White.bold().paint(format!("US${}", usd_per_eth)), Colour::Yellow.bold().paint(format!("{}", wei_per_gas))); info!(target: "miner", "Updated conversion rate to Ξ1 = {} ({} wei/gas)", Colour::White.bold().paint(format!("US${:.2}", usd_per_eth)), Colour::Yellow.bold().paint(format!("{}", wei_per_gas)));
set_price(U256::from(wei_per_gas as u64)); set_price(U256::from(wei_per_gas as u64));
}); });
@ -1048,7 +1048,7 @@ impl MinerService for Miner {
Action::Call(_) => None, Action::Call(_) => None,
Action::Create => { Action::Create => {
let sender = tx.sender(); let sender = tx.sender();
Some(contract_address(&sender, &tx.nonce)) Some(contract_address(self.engine.create_address_scheme(pending.header().number()), &sender, &tx.nonce, &tx.data.sha3()))
} }
}, },
logs: receipt.logs.clone(), logs: receipt.logs.clone(),
@ -1327,6 +1327,10 @@ mod tests {
} }
fn transaction() -> SignedTransaction { fn transaction() -> SignedTransaction {
transaction_with_network_id(2)
}
fn transaction_with_network_id(id: u64) -> SignedTransaction {
let keypair = Random.generate().unwrap(); let keypair = Random.generate().unwrap();
Transaction { Transaction {
action: Action::Create, action: Action::Create,
@ -1335,7 +1339,7 @@ mod tests {
gas: U256::from(100_000), gas: U256::from(100_000),
gas_price: U256::zero(), gas_price: U256::zero(),
nonce: U256::zero(), nonce: U256::zero(),
}.sign(keypair.secret(), None) }.sign(keypair.secret(), Some(id))
} }
#[test] #[test]
@ -1411,21 +1415,21 @@ mod tests {
#[test] #[test]
fn internal_seals_without_work() { fn internal_seals_without_work() {
let miner = Miner::with_spec(&Spec::new_instant()); let spec = Spec::new_instant();
let miner = Miner::with_spec(&spec);
let c = generate_dummy_client(2); let client = generate_dummy_client(2);
let client = c.reference().as_ref();
assert_eq!(miner.import_external_transactions(client, vec![transaction().into()]).pop().unwrap().unwrap(), TransactionImportResult::Current); assert_eq!(miner.import_external_transactions(&*client, vec![transaction_with_network_id(spec.network_id()).into()]).pop().unwrap().unwrap(), TransactionImportResult::Current);
miner.update_sealing(client); miner.update_sealing(&*client);
client.flush_queue(); client.flush_queue();
assert!(miner.pending_block().is_none()); assert!(miner.pending_block().is_none());
assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber);
assert_eq!(miner.import_own_transaction(client, PendingTransaction::new(transaction().into(), None)).unwrap(), TransactionImportResult::Current); assert_eq!(miner.import_own_transaction(&*client, PendingTransaction::new(transaction_with_network_id(spec.network_id()).into(), None)).unwrap(), TransactionImportResult::Current);
miner.update_sealing(client); miner.update_sealing(&*client);
client.flush_queue(); client.flush_queue();
assert!(miner.pending_block().is_none()); assert!(miner.pending_block().is_none());
assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber); assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber);

View File

@ -16,7 +16,6 @@
use util::*; use util::*;
use state::Account; use state::Account;
use account_db::AccountDBMut;
use ethjson; use ethjson;
use types::account_diff::*; use types::account_diff::*;
use rlp::{self, RlpStream}; use rlp::{self, RlpStream};
@ -64,7 +63,7 @@ impl PodAccount {
} }
/// Place additional data into given hash DB. /// Place additional data into given hash DB.
pub fn insert_additional(&self, db: &mut AccountDBMut, factory: &TrieFactory) { pub fn insert_additional(&self, db: &mut HashDB, factory: &TrieFactory) {
match self.code { match self.code {
Some(ref c) if !c.is_empty() => { db.insert(c); } Some(ref c) if !c.is_empty() => { db.insert(c); }
_ => {} _ => {}

View File

@ -0,0 +1,352 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Secondary chunk creation and restoration, implementations for different consensus
//! engines.
use std::collections::VecDeque;
use std::io;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use blockchain::{BlockChain, BlockProvider};
use engines::Engine;
use snapshot::{Error, ManifestData};
use snapshot::block::AbridgedBlock;
use util::{Bytes, H256};
use util::kvdb::KeyValueDB;
use rand::OsRng;
use rlp::{RlpStream, UntrustedRlp};
/// A sink for produced chunks.
pub type ChunkSink<'a> = FnMut(&[u8]) -> io::Result<()> + 'a;
/// Components necessary for snapshot creation and restoration.
pub trait SnapshotComponents: Send {
/// Create secondary snapshot chunks; these corroborate the state data
/// in the state chunks.
///
/// Chunks shouldn't exceed the given preferred size, and should be fed
/// uncompressed into the sink.
///
/// This will vary by consensus engine, so it's exposed as a trait.
fn chunk_all(
&mut self,
chain: &BlockChain,
block_at: H256,
chunk_sink: &mut ChunkSink,
preferred_size: usize,
) -> Result<(), Error>;
/// Create a rebuilder, which will have chunks fed into it in aribtrary
/// order and then be finalized.
///
/// The manifest, a database, and fresh `BlockChain` are supplied.
// TODO: supply anything for state?
fn rebuilder(
&self,
chain: BlockChain,
db: Arc<KeyValueDB>,
manifest: &ManifestData,
) -> Result<Box<Rebuilder>, ::error::Error>;
}
/// Restore from secondary snapshot chunks.
pub trait Rebuilder: Send {
/// Feed a chunk, potentially out of order.
///
/// Check `abort_flag` periodically while doing heavy work. If set to `false`, should bail with
/// `Error::RestorationAborted`.
fn feed(
&mut self,
chunk: &[u8],
engine: &Engine,
abort_flag: &AtomicBool,
) -> Result<(), ::error::Error>;
/// Finalize the restoration. Will be done after all chunks have been
/// fed successfully.
/// This will apply the necessary "glue" between chunks.
fn finalize(&mut self) -> Result<(), Error>;
}
/// Snapshot creation and restoration for PoW chains.
/// This includes blocks from the head of the chain as a
/// loose assurance that the chain is valid.
///
/// The field is the number of blocks from the head of the chain
/// to include in the snapshot.
#[derive(Clone, Copy, PartialEq)]
pub struct PowSnapshot(pub u64);
impl SnapshotComponents for PowSnapshot {
fn chunk_all(
&mut self,
chain: &BlockChain,
block_at: H256,
chunk_sink: &mut ChunkSink,
preferred_size: usize,
) -> Result<(), Error> {
PowWorker {
chain: chain,
rlps: VecDeque::new(),
current_hash: block_at,
writer: chunk_sink,
preferred_size: preferred_size,
}.chunk_all(self.0)
}
fn rebuilder(
&self,
chain: BlockChain,
db: Arc<KeyValueDB>,
manifest: &ManifestData,
) -> Result<Box<Rebuilder>, ::error::Error> {
PowRebuilder::new(chain, db, manifest, self.0).map(|r| Box::new(r) as Box<_>)
}
}
/// Used to build block chunks.
struct PowWorker<'a> {
chain: &'a BlockChain,
// block, receipt rlp pairs.
rlps: VecDeque<Bytes>,
current_hash: H256,
writer: &'a mut ChunkSink<'a>,
preferred_size: usize,
}
impl<'a> PowWorker<'a> {
// Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash.
// Loops until we reach the first desired block, and writes out the remainder.
fn chunk_all(&mut self, snapshot_blocks: u64) -> Result<(), Error> {
let mut loaded_size = 0;
let mut last = self.current_hash;
let genesis_hash = self.chain.genesis_hash();
for _ in 0..snapshot_blocks {
if self.current_hash == genesis_hash { break }
let (block, receipts) = self.chain.block(&self.current_hash)
.and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r)))
.ok_or(Error::BlockNotFound(self.current_hash))?;
let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner();
let pair = {
let mut pair_stream = RlpStream::new_list(2);
pair_stream.append_raw(&abridged_rlp, 1).append(&receipts);
pair_stream.out()
};
let new_loaded_size = loaded_size + pair.len();
// cut off the chunk if too large.
if new_loaded_size > self.preferred_size && !self.rlps.is_empty() {
self.write_chunk(last)?;
loaded_size = pair.len();
} else {
loaded_size = new_loaded_size;
}
self.rlps.push_front(pair);
last = self.current_hash;
self.current_hash = block.header_view().parent_hash();
}
if loaded_size != 0 {
self.write_chunk(last)?;
}
Ok(())
}
// write out the data in the buffers to a chunk on disk
//
// we preface each chunk with the parent of the first block's details,
// obtained from the details of the last block written.
fn write_chunk(&mut self, last: H256) -> Result<(), Error> {
trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len());
let (last_header, last_details) = self.chain.block_header(&last)
.and_then(|n| self.chain.block_details(&last).map(|d| (n, d)))
.ok_or(Error::BlockNotFound(last))?;
let parent_number = last_header.number() - 1;
let parent_hash = last_header.parent_hash();
let parent_total_difficulty = last_details.total_difficulty - *last_header.difficulty();
trace!(target: "snapshot", "parent last written block: {}", parent_hash);
let num_entries = self.rlps.len();
let mut rlp_stream = RlpStream::new_list(3 + num_entries);
rlp_stream.append(&parent_number).append(parent_hash).append(&parent_total_difficulty);
for pair in self.rlps.drain(..) {
rlp_stream.append_raw(&pair, 1);
}
let raw_data = rlp_stream.out();
(self.writer)(&raw_data)?;
Ok(())
}
}
/// Rebuilder for proof-of-work chains.
/// Does basic verification for all blocks, but `PoW` verification for some.
/// Blocks must be fed in-order.
///
/// The first block in every chunk is disconnected from the last block in the
/// chunk before it, as chunks may be submitted out-of-order.
///
/// After all chunks have been submitted, we "glue" the chunks together.
pub struct PowRebuilder {
chain: BlockChain,
db: Arc<KeyValueDB>,
rng: OsRng,
disconnected: Vec<(u64, H256)>,
best_number: u64,
best_hash: H256,
best_root: H256,
fed_blocks: u64,
snapshot_blocks: u64,
}
impl PowRebuilder {
/// Create a new PowRebuilder.
fn new(chain: BlockChain, db: Arc<KeyValueDB>, manifest: &ManifestData, snapshot_blocks: u64) -> Result<Self, ::error::Error> {
Ok(PowRebuilder {
chain: chain,
db: db,
rng: OsRng::new()?,
disconnected: Vec::new(),
best_number: manifest.block_number,
best_hash: manifest.block_hash,
best_root: manifest.state_root,
fed_blocks: 0,
snapshot_blocks: snapshot_blocks,
})
}
}
impl Rebuilder for PowRebuilder {
/// Feed the rebuilder an uncompressed block chunk.
/// Returns the number of blocks fed or any errors.
fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<(), ::error::Error> {
use basic_types::Seal::With;
use views::BlockView;
use snapshot::verify_old_block;
use util::U256;
use util::triehash::ordered_trie_root;
let rlp = UntrustedRlp::new(chunk);
let item_count = rlp.item_count()?;
let num_blocks = (item_count - 3) as u64;
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3);
if self.fed_blocks + num_blocks > self.snapshot_blocks {
return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks).into())
}
// todo: assert here that these values are consistent with chunks being in order.
let mut cur_number = rlp.val_at::<u64>(0)? + 1;
let mut parent_hash = rlp.val_at::<H256>(1)?;
let parent_total_difficulty = rlp.val_at::<U256>(2)?;
for idx in 3..item_count {
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
let pair = rlp.at(idx)?;
let abridged_rlp = pair.at(0)?.as_raw().to_owned();
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?;
let receipts_root = ordered_trie_root(
pair.at(1)?.iter().map(|r| r.as_raw().to_owned())
);
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
let block_bytes = block.rlp_bytes(With);
let is_best = cur_number == self.best_number;
if is_best {
if block.header.hash() != self.best_hash {
return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into())
}
if block.header.state_root() != &self.best_root {
return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into())
}
}
verify_old_block(
&mut self.rng,
&block.header,
engine,
&self.chain,
Some(&block_bytes),
is_best
)?;
let mut batch = self.db.transaction();
// special-case the first block in each chunk.
if idx == 3 {
if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) {
self.disconnected.push((cur_number, block.header.hash()));
}
} else {
self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false);
}
self.db.write_buffered(batch);
self.chain.commit();
parent_hash = BlockView::new(&block_bytes).hash();
cur_number += 1;
}
self.fed_blocks += num_blocks;
Ok(())
}
/// Glue together any disconnected chunks and check that the chain is complete.
fn finalize(&mut self) -> Result<(), Error> {
let mut batch = self.db.transaction();
for (first_num, first_hash) in self.disconnected.drain(..) {
let parent_num = first_num - 1;
// check if the parent is even in the chain.
// since we don't restore every single block in the chain,
// the first block of the first chunks has nothing to connect to.
if let Some(parent_hash) = self.chain.block_hash(parent_num) {
// if so, add the child to it.
self.chain.add_child(&mut batch, parent_hash, first_hash);
}
}
self.db.write_buffered(batch);
Ok(())
}
}

View File

@ -57,6 +57,8 @@ pub enum Error {
VersionNotSupported(u64), VersionNotSupported(u64),
/// Max chunk size is to small to fit basic account data. /// Max chunk size is to small to fit basic account data.
ChunkTooSmall, ChunkTooSmall,
/// Snapshots not supported by the consensus engine.
SnapshotsUnsupported,
} }
impl fmt::Display for Error { impl fmt::Display for Error {
@ -79,6 +81,7 @@ impl fmt::Display for Error {
Error::Trie(ref err) => err.fmt(f), Error::Trie(ref err) => err.fmt(f),
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver), Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
Error::ChunkTooSmall => write!(f, "Chunk size is too small."), Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
} }
} }
} }

View File

@ -17,9 +17,9 @@
//! Snapshot creation, restoration, and network service. //! Snapshot creation, restoration, and network service.
//! //!
//! Documentation of the format can be found at //! Documentation of the format can be found at
//! https://github.com/paritytech/parity/wiki/%22PV64%22-Snapshot-Format //! https://github.com/paritytech/parity/wiki/Warp-Sync-Snapshot-Format
use std::collections::{HashMap, HashSet, VecDeque}; use std::collections::{HashMap, HashSet};
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
@ -28,7 +28,6 @@ use blockchain::{BlockChain, BlockProvider};
use engines::Engine; use engines::Engine;
use header::Header; use header::Header;
use ids::BlockId; use ids::BlockId;
use views::BlockView;
use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256, Uint}; use util::{Bytes, Hashable, HashDB, DBValue, snappy, U256, Uint};
use util::Mutex; use util::Mutex;
@ -40,7 +39,6 @@ use util::sha3::SHA3_NULL_RLP;
use rlp::{RlpStream, UntrustedRlp}; use rlp::{RlpStream, UntrustedRlp};
use bloom_journal::Bloom; use bloom_journal::Bloom;
use self::block::AbridgedBlock;
use self::io::SnapshotWriter; use self::io::SnapshotWriter;
use super::state_db::StateDB; use super::state_db::StateDB;
@ -51,6 +49,7 @@ use rand::{Rng, OsRng};
pub use self::error::Error; pub use self::error::Error;
pub use self::consensus::*;
pub use self::service::{Service, DatabaseRestore}; pub use self::service::{Service, DatabaseRestore};
pub use self::traits::SnapshotService; pub use self::traits::SnapshotService;
pub use self::watcher::Watcher; pub use self::watcher::Watcher;
@ -63,6 +62,7 @@ pub mod service;
mod account; mod account;
mod block; mod block;
mod consensus;
mod error; mod error;
mod watcher; mod watcher;
@ -83,9 +83,6 @@ mod traits {
// Try to have chunks be around 4MB (before compression) // Try to have chunks be around 4MB (before compression)
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024; const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
// How many blocks to include in a snapshot, starting from the head of the chain.
const SNAPSHOT_BLOCKS: u64 = 30000;
/// A progress indicator for snapshots. /// A progress indicator for snapshots.
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct Progress { pub struct Progress {
@ -122,6 +119,7 @@ impl Progress {
} }
/// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer. /// Take a snapshot using the given blockchain, starting block hash, and database, writing into the given writer.
pub fn take_snapshot<W: SnapshotWriter + Send>( pub fn take_snapshot<W: SnapshotWriter + Send>(
engine: &Engine,
chain: &BlockChain, chain: &BlockChain,
block_at: H256, block_at: H256,
state_db: &HashDB, state_db: &HashDB,
@ -136,9 +134,11 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
info!("Taking snapshot starting at block {}", number); info!("Taking snapshot starting at block {}", number);
let writer = Mutex::new(writer); let writer = Mutex::new(writer);
let chunker = engine.snapshot_components().ok_or(Error::SnapshotsUnsupported)?;
let (state_hashes, block_hashes) = scope(|scope| { let (state_hashes, block_hashes) = scope(|scope| {
let block_guard = scope.spawn(|| chunk_blocks(chain, block_at, &writer, p)); let writer = &writer;
let state_res = chunk_state(state_db, state_root, &writer, p); let block_guard = scope.spawn(move || chunk_secondary(chunker, chain, block_at, writer, p));
let state_res = chunk_state(state_db, state_root, writer, p);
state_res.and_then(|state_hashes| { state_res.and_then(|state_hashes| {
block_guard.join().map(|block_hashes| (state_hashes, block_hashes)) block_guard.join().map(|block_hashes| (state_hashes, block_hashes))
@ -163,128 +163,41 @@ pub fn take_snapshot<W: SnapshotWriter + Send>(
Ok(()) Ok(())
} }
/// Used to build block chunks. /// Create and write out all secondary chunks to disk, returning a vector of all
struct BlockChunker<'a> { /// the hashes of secondary chunks created.
chain: &'a BlockChain,
// block, receipt rlp pairs.
rlps: VecDeque<Bytes>,
current_hash: H256,
hashes: Vec<H256>,
snappy_buffer: Vec<u8>,
writer: &'a Mutex<SnapshotWriter + 'a>,
progress: &'a Progress,
}
impl<'a> BlockChunker<'a> {
// Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash.
// Loops until we reach the first desired block, and writes out the remainder.
fn chunk_all(&mut self) -> Result<(), Error> {
let mut loaded_size = 0;
let mut last = self.current_hash;
let genesis_hash = self.chain.genesis_hash();
for _ in 0..SNAPSHOT_BLOCKS {
if self.current_hash == genesis_hash { break }
let (block, receipts) = self.chain.block(&self.current_hash)
.and_then(|b| self.chain.block_receipts(&self.current_hash).map(|r| (b, r)))
.ok_or(Error::BlockNotFound(self.current_hash))?;
let abridged_rlp = AbridgedBlock::from_block_view(&block.view()).into_inner();
let pair = {
let mut pair_stream = RlpStream::new_list(2);
pair_stream.append_raw(&abridged_rlp, 1).append(&receipts);
pair_stream.out()
};
let new_loaded_size = loaded_size + pair.len();
// cut off the chunk if too large.
if new_loaded_size > PREFERRED_CHUNK_SIZE && !self.rlps.is_empty() {
self.write_chunk(last)?;
loaded_size = pair.len();
} else {
loaded_size = new_loaded_size;
}
self.rlps.push_front(pair);
last = self.current_hash;
self.current_hash = block.header_view().parent_hash();
}
if loaded_size != 0 {
self.write_chunk(last)?;
}
Ok(())
}
// write out the data in the buffers to a chunk on disk
//
// we preface each chunk with the parent of the first block's details,
// obtained from the details of the last block written.
fn write_chunk(&mut self, last: H256) -> Result<(), Error> {
trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len());
let (last_header, last_details) = self.chain.block_header(&last)
.and_then(|n| self.chain.block_details(&last).map(|d| (n, d)))
.ok_or(Error::BlockNotFound(last))?;
let parent_number = last_header.number() - 1;
let parent_hash = last_header.parent_hash();
let parent_total_difficulty = last_details.total_difficulty - *last_header.difficulty();
trace!(target: "snapshot", "parent last written block: {}", parent_hash);
let num_entries = self.rlps.len();
let mut rlp_stream = RlpStream::new_list(3 + num_entries);
rlp_stream.append(&parent_number).append(parent_hash).append(&parent_total_difficulty);
for pair in self.rlps.drain(..) {
rlp_stream.append_raw(&pair, 1);
}
let raw_data = rlp_stream.out();
let size = snappy::compress_into(&raw_data, &mut self.snappy_buffer);
let compressed = &self.snappy_buffer[..size];
let hash = compressed.sha3();
self.writer.lock().write_block_chunk(hash, compressed)?;
trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len());
self.progress.size.fetch_add(size, Ordering::SeqCst);
self.progress.blocks.fetch_add(num_entries, Ordering::SeqCst);
self.hashes.push(hash);
Ok(())
}
}
/// Create and write out all block chunks to disk, returning a vector of all
/// the hashes of block chunks created.
/// ///
/// The path parameter is the directory to store the block chunks in. /// Secondary chunks are engine-specific, but they intend to corroborate the state data
/// This function assumes the directory exists already. /// in the state chunks.
/// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis. /// Returns a list of chunk hashes, with the first having the blocks furthest from the genesis.
pub fn chunk_blocks<'a>(chain: &'a BlockChain, start_hash: H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> { pub fn chunk_secondary<'a>(mut chunker: Box<SnapshotComponents>, chain: &'a BlockChain, start_hash: H256, writer: &Mutex<SnapshotWriter + 'a>, progress: &'a Progress) -> Result<Vec<H256>, Error> {
let mut chunker = BlockChunker { let mut chunk_hashes = Vec::new();
chain: chain, let mut snappy_buffer = vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)];
rlps: VecDeque::new(),
current_hash: start_hash, {
hashes: Vec::new(), let mut chunk_sink = |raw_data: &[u8]| {
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)], let compressed_size = snappy::compress_into(raw_data, &mut snappy_buffer);
writer: writer, let compressed = &snappy_buffer[..compressed_size];
progress: progress, let hash = compressed.sha3();
let size = compressed.len();
writer.lock().write_block_chunk(hash, compressed)?;
trace!(target: "snapshot", "wrote secondary chunk. hash: {}, size: {}, uncompressed size: {}",
hash.hex(), size, raw_data.len());
progress.size.fetch_add(size, Ordering::SeqCst);
chunk_hashes.push(hash);
Ok(())
}; };
chunker.chunk_all()?; chunker.chunk_all(
chain,
start_hash,
&mut chunk_sink,
PREFERRED_CHUNK_SIZE,
)?;
}
Ok(chunker.hashes) Ok(chunk_hashes)
} }
/// State trie chunker. /// State trie chunker.
@ -564,158 +477,15 @@ const POW_VERIFY_RATE: f32 = 0.02;
/// the fullest verification possible. If not, it will take a random sample to determine whether it will /// the fullest verification possible. If not, it will take a random sample to determine whether it will
/// do heavy or light verification. /// do heavy or light verification.
pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> { pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &Engine, chain: &BlockChain, body: Option<&[u8]>, always: bool) -> Result<(), ::error::Error> {
engine.verify_block_basic(header, body)?;
if always || rng.gen::<f32>() <= POW_VERIFY_RATE { if always || rng.gen::<f32>() <= POW_VERIFY_RATE {
engine.verify_block_unordered(header, body)?;
match chain.block_header(header.parent_hash()) { match chain.block_header(header.parent_hash()) {
Some(parent) => engine.verify_block_family(header, &parent, body), Some(parent) => engine.verify_block_family(header, &parent, body),
None => engine.verify_block_seal(header), None => Ok(()),
} }
} else { } else {
engine.verify_block_basic(header, body)
}
}
/// Rebuilds the blockchain from chunks.
///
/// Does basic verification for all blocks, but `PoW` verification for some.
/// Blocks must be fed in-order.
///
/// The first block in every chunk is disconnected from the last block in the
/// chunk before it, as chunks may be submitted out-of-order.
///
/// After all chunks have been submitted, we "glue" the chunks together.
pub struct BlockRebuilder {
chain: BlockChain,
db: Arc<Database>,
rng: OsRng,
disconnected: Vec<(u64, H256)>,
best_number: u64,
best_hash: H256,
best_root: H256,
fed_blocks: u64,
}
impl BlockRebuilder {
/// Create a new BlockRebuilder.
pub fn new(chain: BlockChain, db: Arc<Database>, manifest: &ManifestData) -> Result<Self, ::error::Error> {
Ok(BlockRebuilder {
chain: chain,
db: db,
rng: OsRng::new()?,
disconnected: Vec::new(),
best_number: manifest.block_number,
best_hash: manifest.block_hash,
best_root: manifest.state_root,
fed_blocks: 0,
})
}
/// Feed the rebuilder an uncompressed block chunk.
/// Returns the number of blocks fed or any errors.
pub fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<u64, ::error::Error> {
use basic_types::Seal::With;
use util::U256;
use util::triehash::ordered_trie_root;
let rlp = UntrustedRlp::new(chunk);
let item_count = rlp.item_count()?;
let num_blocks = (item_count - 3) as u64;
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3);
if self.fed_blocks + num_blocks > SNAPSHOT_BLOCKS {
return Err(Error::TooManyBlocks(SNAPSHOT_BLOCKS, self.fed_blocks).into())
}
// todo: assert here that these values are consistent with chunks being in order.
let mut cur_number = rlp.val_at::<u64>(0)? + 1;
let mut parent_hash = rlp.val_at::<H256>(1)?;
let parent_total_difficulty = rlp.val_at::<U256>(2)?;
for idx in 3..item_count {
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
let pair = rlp.at(idx)?;
let abridged_rlp = pair.at(0)?.as_raw().to_owned();
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?;
let receipts_root = ordered_trie_root(
pair.at(1)?.iter().map(|r| r.as_raw().to_owned())
);
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
let block_bytes = block.rlp_bytes(With);
let is_best = cur_number == self.best_number;
if is_best {
if block.header.hash() != self.best_hash {
return Err(Error::WrongBlockHash(cur_number, self.best_hash, block.header.hash()).into())
}
if block.header.state_root() != &self.best_root {
return Err(Error::WrongStateRoot(self.best_root, *block.header.state_root()).into())
}
}
verify_old_block(
&mut self.rng,
&block.header,
engine,
&self.chain,
Some(&block_bytes),
is_best
)?;
let mut batch = self.db.transaction();
// special-case the first block in each chunk.
if idx == 3 {
if self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, Some(parent_total_difficulty), is_best, false) {
self.disconnected.push((cur_number, block.header.hash()));
}
} else {
self.chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, is_best, false);
}
self.db.write_buffered(batch);
self.chain.commit();
parent_hash = BlockView::new(&block_bytes).hash();
cur_number += 1;
}
self.fed_blocks += num_blocks;
Ok(num_blocks)
}
/// Glue together any disconnected chunks and check that the chain is complete.
pub fn finalize(self, canonical: HashMap<u64, H256>) -> Result<(), Error> {
let mut batch = self.db.transaction();
for (first_num, first_hash) in self.disconnected {
let parent_num = first_num - 1;
// check if the parent is even in the chain.
// since we don't restore every single block in the chain,
// the first block of the first chunks has nothing to connect to.
if let Some(parent_hash) = self.chain.block_hash(parent_num) {
// if so, add the child to it.
self.chain.add_child(&mut batch, parent_hash, first_hash);
}
}
self.db.write_buffered(batch);
let best_number = self.best_number;
for num in (0..self.fed_blocks).map(|x| best_number - x) {
let hash = self.chain.block_hash(num).ok_or(Error::IncompleteChain)?;
if let Some(canon_hash) = canonical.get(&num).cloned() {
if canon_hash != hash {
return Err(Error::WrongBlockHash(num, canon_hash, hash));
}
}
}
Ok(()) Ok(())
} }
} }

View File

@ -16,14 +16,14 @@
//! Snapshot network service implementation. //! Snapshot network service implementation.
use std::collections::{HashMap, HashSet}; use std::collections::HashSet;
use std::io::ErrorKind; use std::io::ErrorKind;
use std::fs; use std::fs;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, SnapshotService}; use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService};
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
use blockchain::BlockChain; use blockchain::BlockChain;
@ -69,12 +69,11 @@ struct Restoration {
state_chunks_left: HashSet<H256>, state_chunks_left: HashSet<H256>,
block_chunks_left: HashSet<H256>, block_chunks_left: HashSet<H256>,
state: StateRebuilder, state: StateRebuilder,
blocks: BlockRebuilder, secondary: Box<Rebuilder>,
writer: Option<LooseWriter>, writer: Option<LooseWriter>,
snappy_buffer: Bytes, snappy_buffer: Bytes,
final_state_root: H256, final_state_root: H256,
guard: Guard, guard: Guard,
canonical_hashes: HashMap<u64, H256>,
db: Arc<Database>, db: Arc<Database>,
} }
@ -86,6 +85,7 @@ struct RestorationParams<'a> {
writer: Option<LooseWriter>, // writer for recovered snapshot. writer: Option<LooseWriter>, // writer for recovered snapshot.
genesis: &'a [u8], // genesis block of the chain. genesis: &'a [u8], // genesis block of the chain.
guard: Guard, // guard for the restoration directory. guard: Guard, // guard for the restoration directory.
engine: &'a Engine,
} }
impl Restoration { impl Restoration {
@ -100,7 +100,10 @@ impl Restoration {
.map_err(UtilError::SimpleString)?); .map_err(UtilError::SimpleString)?);
let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone());
let blocks = BlockRebuilder::new(chain, raw_db.clone(), &manifest)?; let components = params.engine.snapshot_components()
.ok_or_else(|| ::snapshot::Error::SnapshotsUnsupported)?;
let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?;
let root = manifest.state_root.clone(); let root = manifest.state_root.clone();
Ok(Restoration { Ok(Restoration {
@ -108,12 +111,11 @@ impl Restoration {
state_chunks_left: state_chunks, state_chunks_left: state_chunks,
block_chunks_left: block_chunks, block_chunks_left: block_chunks,
state: StateRebuilder::new(raw_db.clone(), params.pruning), state: StateRebuilder::new(raw_db.clone(), params.pruning),
blocks: blocks, secondary: secondary,
writer: params.writer, writer: params.writer,
snappy_buffer: Vec::new(), snappy_buffer: Vec::new(),
final_state_root: root, final_state_root: root,
guard: params.guard, guard: params.guard,
canonical_hashes: HashMap::new(),
db: raw_db, db: raw_db,
}) })
} }
@ -138,7 +140,7 @@ impl Restoration {
if self.block_chunks_left.remove(&hash) { if self.block_chunks_left.remove(&hash) {
let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?;
self.blocks.feed(&self.snappy_buffer[..len], engine, flag)?; self.secondary.feed(&self.snappy_buffer[..len], engine, flag)?;
if let Some(ref mut writer) = self.writer.as_mut() { if let Some(ref mut writer) = self.writer.as_mut() {
writer.write_block_chunk(hash, chunk)?; writer.write_block_chunk(hash, chunk)?;
} }
@ -147,13 +149,8 @@ impl Restoration {
Ok(()) Ok(())
} }
// note canonical hashes.
fn note_canonical(&mut self, hashes: &[(u64, H256)]) {
self.canonical_hashes.extend(hashes.iter().cloned());
}
// finish up restoration. // finish up restoration.
fn finalize(self) -> Result<(), Error> { fn finalize(mut self) -> Result<(), Error> {
use util::trie::TrieError; use util::trie::TrieError;
if !self.is_done() { return Ok(()) } if !self.is_done() { return Ok(()) }
@ -169,7 +166,7 @@ impl Restoration {
self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?; self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?;
// connect out-of-order chunks and verify chain integrity. // connect out-of-order chunks and verify chain integrity.
self.blocks.finalize(self.canonical_hashes)?; self.secondary.finalize()?;
if let Some(writer) = self.writer { if let Some(writer) = self.writer {
writer.finish(self.manifest)?; writer.finish(self.manifest)?;
@ -425,6 +422,7 @@ impl Service {
writer: writer, writer: writer,
genesis: &self.genesis_block, genesis: &self.genesis_block,
guard: Guard::new(rest_dir), guard: Guard::new(rest_dir),
engine: &*self.engine,
}; };
let state_chunks = params.manifest.state_hashes.len(); let state_chunks = params.manifest.state_hashes.len();
@ -593,14 +591,6 @@ impl SnapshotService for Service {
trace!("Error sending snapshot service message: {:?}", e); trace!("Error sending snapshot service message: {:?}", e);
} }
} }
fn provide_canon_hashes(&self, canonical: &[(u64, H256)]) {
let mut rest = self.restoration.lock();
if let Some(ref mut rest) = rest.as_mut() {
rest.note_canonical(canonical);
}
}
} }
impl Drop for Service { impl Drop for Service {

View File

@ -48,10 +48,6 @@ pub trait SnapshotService : Sync + Send {
/// Feed a raw block chunk to the service to be processed asynchronously. /// Feed a raw block chunk to the service to be processed asynchronously.
/// no-op if currently restoring. /// no-op if currently restoring.
fn restore_block_chunk(&self, hash: H256, chunk: Bytes); fn restore_block_chunk(&self, hash: H256, chunk: Bytes);
/// Give the restoration in-progress some canonical block hashes for
/// extra verification (performed at the end)
fn provide_canon_hashes(&self, canonical: &[(u64, H256)]);
} }
impl IpcConfig for SnapshotService { } impl IpcConfig for SnapshotService { }

View File

@ -21,33 +21,32 @@ use error::Error;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use blockchain::BlockChain; use blockchain::BlockChain;
use snapshot::{chunk_blocks, BlockRebuilder, Error as SnapshotError, Progress}; use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use util::{Mutex, snappy}; use util::{Mutex, snappy};
use util::kvdb::{Database, DatabaseConfig}; use util::kvdb::{self, KeyValueDB, DBTransaction};
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot(30000);
fn chunk_and_restore(amount: u64) { fn chunk_and_restore(amount: u64) {
let mut canon_chain = ChainGenerator::default(); let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default(); let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap(); let genesis = canon_chain.generate(&mut finalizer).unwrap();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let engine = Arc::new(::engines::NullEngine::default()); let engine = Arc::new(::engines::NullEngine::default());
let orig_path = RandomTempPath::create_dir();
let new_path = RandomTempPath::create_dir(); let new_path = RandomTempPath::create_dir();
let mut snapshot_path = new_path.as_path().to_owned(); let mut snapshot_path = new_path.as_path().to_owned();
snapshot_path.push("SNAP"); snapshot_path.push("SNAP");
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap()); let old_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone()); let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
// build the blockchain. // build the blockchain.
let mut batch = old_db.transaction(); let mut batch = DBTransaction::new();
for _ in 0..amount { for _ in 0..amount {
let block = canon_chain.generate(&mut finalizer).unwrap(); let block = canon_chain.generate(&mut finalizer).unwrap();
bc.insert_block(&mut batch, &block, vec![]); bc.insert_block(&mut batch, &block, vec![]);
@ -56,12 +55,18 @@ fn chunk_and_restore(amount: u64) {
old_db.write(batch).unwrap(); old_db.write(batch).unwrap();
let best_hash = bc.best_block_hash(); let best_hash = bc.best_block_hash();
// snapshot it. // snapshot it.
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap()); let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
let block_hashes = chunk_blocks(&bc, best_hash, &writer, &Progress::default()).unwrap(); let block_hashes = chunk_secondary(
Box::new(SNAPSHOT_MODE),
&bc,
best_hash,
&writer,
&Progress::default()
).unwrap();
let manifest = ::snapshot::ManifestData { let manifest = ::snapshot::ManifestData {
version: 2, version: 2,
state_hashes: Vec::new(), state_hashes: Vec::new(),
@ -74,9 +79,10 @@ fn chunk_and_restore(amount: u64) {
writer.into_inner().finish(manifest.clone()).unwrap(); writer.into_inner().finish(manifest.clone()).unwrap();
// restore it. // restore it.
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap()); let new_db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone()); let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap(); let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let flag = AtomicBool::new(true); let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().block_hashes { for chunk_hash in &reader.manifest().block_hashes {
@ -85,7 +91,8 @@ fn chunk_and_restore(amount: u64) {
rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap(); rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap();
} }
rebuilder.finalize(HashMap::new()).unwrap(); rebuilder.finalize().unwrap();
drop(rebuilder);
// and test it. // and test it.
let new_chain = BlockChain::new(Default::default(), &genesis, new_db); let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
@ -118,10 +125,8 @@ fn checks_flag() {
}; };
let chunk = stream.out(); let chunk = stream.out();
let path = RandomTempPath::create_dir();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let db = Arc::new(kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
let engine = Arc::new(::engines::NullEngine::default()); let engine = Arc::new(::engines::NullEngine::default());
let chain = BlockChain::new(Default::default(), &genesis, db.clone()); let chain = BlockChain::new(Default::default(), &genesis, db.clone());
@ -134,7 +139,7 @@ fn checks_flag() {
block_hash: H256::default(), block_hash: H256::default(),
}; };
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap(); let mut rebuilder = SNAPSHOT_MODE.rebuilder(chain, db.clone(), &manifest).unwrap();
match rebuilder.feed(&chunk, engine.as_ref(), &AtomicBool::new(false)) { match rebuilder.feed(&chunk, engine.as_ref(), &AtomicBool::new(false)) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {} Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}

View File

@ -16,25 +16,27 @@
//! Parameters for a block chain. //! Parameters for a block chain.
use util::*;
use builtin::Builtin;
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint};
use factory::Factories;
use executive::Executive;
use trace::{NoopTracer, NoopVMTracer};
use action_params::{ActionValue, ActionParams};
use types::executed::CallType;
use state::{Backend, State, Substate};
use env_info::EnvInfo;
use pod_state::*;
use account_db::*;
use header::{BlockNumber, Header};
use state_db::StateDB;
use super::genesis::Genesis; use super::genesis::Genesis;
use super::seal::Generic as GenericSeal; use super::seal::Generic as GenericSeal;
use action_params::{ActionValue, ActionParams};
use builtin::Builtin;
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint};
use env_info::EnvInfo;
use error::Error;
use ethereum; use ethereum;
use ethjson; use ethjson;
use executive::Executive;
use factory::Factories;
use header::{BlockNumber, Header};
use pod_state::*;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use state_db::StateDB;
use state::{Backend, State, Substate};
use state::backend::Basic as BasicBackend;
use trace::{NoopTracer, NoopVMTracer};
use types::executed::CallType;
use util::*;
/// Parameters common to all engines. /// Parameters common to all engines.
#[derive(Debug, PartialEq, Clone, Default)] #[derive(Debug, PartialEq, Clone, Default)]
@ -57,6 +59,8 @@ pub struct CommonParams {
pub eip98_transition: BlockNumber, pub eip98_transition: BlockNumber,
/// Validate block receipts root. /// Validate block receipts root.
pub validate_receipts_transition: u64, pub validate_receipts_transition: u64,
/// Number of first block where EIP-86 (Metropolis) rules begin.
pub eip86_transition: BlockNumber,
} }
impl From<ethjson::spec::Params> for CommonParams { impl From<ethjson::spec::Params> for CommonParams {
@ -71,6 +75,7 @@ impl From<ethjson::spec::Params> for CommonParams {
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None }, fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None },
eip98_transition: p.eip98_transition.map_or(0, Into::into), eip98_transition: p.eip98_transition.map_or(0, Into::into),
validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into), validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into),
eip86_transition: p.eip86_transition.map_or(BlockNumber::max_value(), Into::into),
} }
} }
} }
@ -116,19 +121,19 @@ pub struct Spec {
constructors: Vec<(Address, Bytes)>, constructors: Vec<(Address, Bytes)>,
/// May be prepopulated if we know this in advance. /// May be prepopulated if we know this in advance.
state_root_memo: RwLock<Option<H256>>, state_root_memo: RwLock<H256>,
/// Genesis state as plain old data. /// Genesis state as plain old data.
genesis_state: PodState, genesis_state: PodState,
} }
impl From<ethjson::spec::Spec> for Spec { fn load_from(s: ethjson::spec::Spec) -> Result<Spec, Error> {
fn from(s: ethjson::spec::Spec) -> Self {
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect(); let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
let g = Genesis::from(s.genesis); let g = Genesis::from(s.genesis);
let GenericSeal(seal_rlp) = g.seal.into(); let GenericSeal(seal_rlp) = g.seal.into();
let params = CommonParams::from(s.params); let params = CommonParams::from(s.params);
Spec {
let mut s = Spec {
name: s.name.clone().into(), name: s.name.clone().into(),
params: params.clone(), params: params.clone(),
engine: Spec::engine(s.engine, params, builtins), engine: Spec::engine(s.engine, params, builtins),
@ -145,10 +150,17 @@ impl From<ethjson::spec::Spec> for Spec {
extra_data: g.extra_data, extra_data: g.extra_data,
seal_rlp: seal_rlp, seal_rlp: seal_rlp,
constructors: s.accounts.constructors().into_iter().map(|(a, c)| (a.into(), c.into())).collect(), constructors: s.accounts.constructors().into_iter().map(|(a, c)| (a.into(), c.into())).collect(),
state_root_memo: RwLock::new(g.state_root), state_root_memo: RwLock::new(Default::default()), // will be overwritten right after.
genesis_state: From::from(s.accounts), genesis_state: s.accounts.into(),
} };
// use memoized state root if provided.
match g.state_root {
Some(root) => *s.state_root_memo.get_mut() = root,
None => { let _ = s.run_constructors(&Default::default(), BasicBackend(MemoryDB::new()))?; },
} }
Ok(s)
} }
macro_rules! load_bundled { macro_rules! load_bundled {
@ -171,13 +183,93 @@ impl Spec {
} }
} }
// given a pre-constructor state, run all the given constructors and produce a new state and state root.
fn run_constructors<T: Backend>(&self, factories: &Factories, mut db: T) -> Result<T, Error> {
let mut root = SHA3_NULL_RLP;
// basic accounts in spec.
{
let mut t = factories.trie.create(db.as_hashdb_mut(), &mut root);
for (address, account) in self.genesis_state.get().iter() {
t.insert(&**address, &account.rlp())?;
}
}
for (address, account) in self.genesis_state.get().iter() {
db.note_non_null_account(address);
account.insert_additional(
&mut *factories.accountdb.create(db.as_hashdb_mut(), address.sha3()),
&factories.trie
);
}
let start_nonce = self.engine.account_start_nonce();
let (root, db) = {
let mut state = State::from_existing(
db,
root,
start_nonce,
factories.clone(),
)?;
// Execute contract constructors.
let env_info = EnvInfo {
number: 0,
author: self.author,
timestamp: self.timestamp,
difficulty: self.difficulty,
last_hashes: Default::default(),
gas_used: U256::zero(),
gas_limit: U256::max_value(),
};
let from = Address::default();
for &(ref address, ref constructor) in self.constructors.iter() {
trace!(target: "spec", "run_constructors: Creating a contract at {}.", address);
trace!(target: "spec", " .. root before = {}", state.root());
let params = ActionParams {
code_address: address.clone(),
code_hash: constructor.sha3(),
address: address.clone(),
sender: from.clone(),
origin: from.clone(),
gas: U256::max_value(),
gas_price: Default::default(),
value: ActionValue::Transfer(Default::default()),
code: Some(Arc::new(constructor.clone())),
data: None,
call_type: CallType::None,
};
let mut substate = Substate::new();
state.kill_account(&address);
{
let mut exec = Executive::new(&mut state, &env_info, self.engine.as_ref(), &factories.vm);
if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) {
warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e);
}
}
if let Err(e) = state.commit() {
warn!(target: "spec", "Genesis constructor trie commit at {} failed: {}.", address, e);
}
trace!(target: "spec", " .. root after = {}", state.root());
}
state.drop()
};
*self.state_root_memo.write() = root;
Ok(db)
}
/// Return the state root for the genesis state, memoising accordingly. /// Return the state root for the genesis state, memoising accordingly.
pub fn state_root(&self) -> H256 { pub fn state_root(&self) -> H256 {
if self.state_root_memo.read().is_none() { self.state_root_memo.read().clone()
*self.state_root_memo.write() = Some(self.genesis_state.root());
}
self.state_root_memo.read().as_ref().cloned()
.expect("state root memo ensured to be set at this point; qed")
} }
/// Get the known knodes of the network in enode format. /// Get the known knodes of the network in enode format.
@ -240,94 +332,46 @@ impl Spec {
self.timestamp = g.timestamp; self.timestamp = g.timestamp;
self.extra_data = g.extra_data; self.extra_data = g.extra_data;
self.seal_rlp = seal_rlp; self.seal_rlp = seal_rlp;
self.state_root_memo = RwLock::new(g.state_root);
} }
/// Alter the value of the genesis state. /// Alter the value of the genesis state.
pub fn set_genesis_state(&mut self, s: PodState) { pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> {
self.genesis_state = s; self.genesis_state = s;
*self.state_root_memo.write() = None; let _ = self.run_constructors(&Default::default(), BasicBackend(MemoryDB::new()))?;
Ok(())
} }
/// Returns `false` if the memoized state root is invalid. `true` otherwise. /// Returns `false` if the memoized state root is invalid. `true` otherwise.
pub fn is_state_root_valid(&self) -> bool { pub fn is_state_root_valid(&self) -> bool {
self.state_root_memo.read().clone().map_or(true, |sr| sr == self.genesis_state.root()) // TODO: get rid of this function and ensure state root always is valid.
// we're mostly there, but `self.genesis_state.root()` doesn't encompass
// post-constructor state.
*self.state_root_memo.read() == self.genesis_state.root()
} }
/// Ensure that the given state DB has the trie nodes in for the genesis state. /// Ensure that the given state DB has the trie nodes in for the genesis state.
pub fn ensure_db_good(&self, mut db: StateDB, factories: &Factories) -> Result<StateDB, Box<TrieError>> { pub fn ensure_db_good(&self, db: StateDB, factories: &Factories) -> Result<StateDB, Error> {
if db.as_hashdb().contains(&self.state_root()) { if db.as_hashdb().contains(&self.state_root()) {
return Ok(db) return Ok(db)
} }
trace!(target: "spec", "ensure_db_good: Fresh database? Cannot find state root {}", self.state_root());
let mut root = H256::new();
{ // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever
let mut t = factories.trie.create(db.as_hashdb_mut(), &mut root); // called anyway.
for (address, account) in self.genesis_state.get().iter() { let db = self.run_constructors(factories, db)?;
t.insert(&**address, &account.rlp())?;
}
}
trace!(target: "spec", "ensure_db_good: Populated sec trie; root is {}", root);
for (address, account) in self.genesis_state.get().iter() {
db.note_non_null_account(address);
account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address), &factories.trie);
}
// Execute contract constructors.
let env_info = EnvInfo {
number: 0,
author: self.author,
timestamp: self.timestamp,
difficulty: self.difficulty,
last_hashes: Default::default(),
gas_used: U256::zero(),
gas_limit: U256::max_value(),
};
let from = Address::default();
let start_nonce = self.engine.account_start_nonce();
let mut state = State::from_existing(db, root, start_nonce, factories.clone())?;
// Mutate the state with each constructor.
for &(ref address, ref constructor) in self.constructors.iter() {
trace!(target: "spec", "ensure_db_good: Creating a contract at {}.", address);
let params = ActionParams {
code_address: address.clone(),
code_hash: constructor.sha3(),
address: address.clone(),
sender: from.clone(),
origin: from.clone(),
gas: U256::max_value(),
gas_price: Default::default(),
value: ActionValue::Transfer(Default::default()),
code: Some(Arc::new(constructor.clone())),
data: None,
call_type: CallType::None,
};
let mut substate = Substate::new();
{
let mut exec = Executive::new(&mut state, &env_info, self.engine.as_ref(), &factories.vm);
if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) {
warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e);
}
}
if let Err(e) = state.commit() {
warn!(target: "spec", "Genesis constructor trie commit at {} failed: {}.", address, e);
}
}
let (root, db) = state.drop();
*self.state_root_memo.write() = Some(root);
Ok(db) Ok(db)
} }
/// Loads spec from json file. /// Loads spec from json file. Provide factories for executing contracts and ensuring
/// storage goes to the right place.
pub fn load<R>(reader: R) -> Result<Self, String> where R: Read { pub fn load<R>(reader: R) -> Result<Self, String> where R: Read {
match ethjson::spec::Spec::load(reader) { fn fmt<F: ::std::fmt::Display>(f: F) -> String {
Ok(spec) => Ok(spec.into()), format!("Spec json is invalid: {}", f)
Err(e) => Err(format!("Spec json is invalid: {}", e)),
} }
ethjson::spec::Spec::load(reader).map_err(fmt)
.and_then(|x| load_from(x).map_err(fmt))
} }
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus. /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
@ -391,9 +435,9 @@ mod tests {
#[test] #[test]
fn genesis_constructor() { fn genesis_constructor() {
::ethcore_logger::init_log();
let spec = Spec::new_test_constructor(); let spec = Spec::new_test_constructor();
let mut db_result = get_temp_state_db(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let db = spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let state = State::from_existing(db.boxed_clone(), spec.state_root(), spec.engine.account_start_nonce(), Default::default()).unwrap(); let state = State::from_existing(db.boxed_clone(), spec.state_root(), spec.engine.account_start_nonce(), Default::default()).unwrap();
let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
assert_eq!(state.storage_at(&Address::from_str("0000000000000000000000000000000000000005").unwrap(), &H256::zero()).unwrap(), expected); assert_eq!(state.storage_at(&Address::from_str("0000000000000000000000000000000000000005").unwrap(), &H256::zero()).unwrap(), expected);

View File

@ -206,7 +206,8 @@ impl<H: AsHashDB> Proving<H> {
} }
} }
/// Consume the backend, extracting the gathered proof. /// Consume the backend, extracting the gathered proof in lexicographical order
/// by value.
pub fn extract_proof(self) -> Vec<DBValue> { pub fn extract_proof(self) -> Vec<DBValue> {
self.proof.into_inner().into_iter().collect() self.proof.into_inner().into_iter().collect()
} }
@ -221,3 +222,33 @@ impl<H: AsHashDB + Clone> Clone for Proving<H> {
} }
} }
} }
/// A basic backend. Just wraps the given database, directly inserting into and deleting from
/// it. Doesn't cache anything.
pub struct Basic<H>(pub H);
impl<H: AsHashDB + Send + Sync> Backend for Basic<H> {
fn as_hashdb(&self) -> &HashDB {
self.0.as_hashdb()
}
fn as_hashdb_mut(&mut self) -> &mut HashDB {
self.0.as_hashdb_mut()
}
fn add_to_account_cache(&mut self, _: Address, _: Option<Account>, _: bool) { }
fn cache_code(&self, _: H256, _: Arc<Vec<u8>>) { }
fn get_cached_account(&self, _: &Address) -> Option<Option<Account>> { None }
fn get_cached<F, U>(&self, _: &Address, _: F) -> Option<U>
where F: FnOnce(Option<&mut Account>) -> U
{
None
}
fn get_cached_code(&self, _: &H256) -> Option<Arc<Vec<u8>>> { None }
fn note_non_null_account(&self, _: &Address) { }
fn is_known_null(&self, _: &Address) -> bool { false }
}

View File

@ -238,7 +238,7 @@ pub fn check_proof(
/// Reverting a checkpoint with `revert_to_checkpoint` involves copying /// Reverting a checkpoint with `revert_to_checkpoint` involves copying
/// original values from the latest checkpoint back into `cache`. The code /// original values from the latest checkpoint back into `cache`. The code
/// takes care not to overwrite cached storage while doing that. /// takes care not to overwrite cached storage while doing that.
/// checkpoint can be discateded with `discard_checkpoint`. All of the orignal /// checkpoint can be discarded with `discard_checkpoint`. All of the orignal
/// backed-up values are moved into a parent checkpoint (if any). /// backed-up values are moved into a parent checkpoint (if any).
/// ///
pub struct State<B: Backend> { pub struct State<B: Backend> {
@ -433,6 +433,11 @@ impl<B: Backend> State<B> {
self.ensure_cached(a, RequireCache::None, false, |a| a.map_or(false, |a| !a.is_null())) self.ensure_cached(a, RequireCache::None, false, |a| a.map_or(false, |a| !a.is_null()))
} }
/// Determine whether an account exists and has code.
pub fn exists_and_has_code(&self, a: &Address) -> trie::Result<bool> {
self.ensure_cached(a, RequireCache::CodeSize, false, |a| a.map_or(false, |a| a.code_size().map_or(false, |size| size != 0)))
}
/// Get the balance of account `a`. /// Get the balance of account `a`.
pub fn balance(&self, a: &Address) -> trie::Result<U256> { pub fn balance(&self, a: &Address) -> trie::Result<U256> {
self.ensure_cached(a, RequireCache::None, true, self.ensure_cached(a, RequireCache::None, true,
@ -939,7 +944,6 @@ mod tests {
use ethkey::Secret; use ethkey::Secret;
use util::{U256, H256, Address, Hashable}; use util::{U256, H256, Address, Hashable};
use tests::helpers::*; use tests::helpers::*;
use devtools::*;
use env_info::EnvInfo; use env_info::EnvInfo;
use spec::*; use spec::*;
use transaction::*; use transaction::*;
@ -955,8 +959,7 @@ mod tests {
fn should_apply_create_transaction() { fn should_apply_create_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -998,9 +1001,8 @@ mod tests {
let a = Address::zero(); let a = Address::zero();
let temp = RandomTempPath::new();
let mut state = { let mut state = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state();
assert_eq!(state.exists(&a).unwrap(), false); assert_eq!(state.exists(&a).unwrap(), false);
state.inc_nonce(&a).unwrap(); state.inc_nonce(&a).unwrap();
state.commit().unwrap(); state.commit().unwrap();
@ -1015,8 +1017,7 @@ mod tests {
fn should_trace_failed_create_transaction() { fn should_trace_failed_create_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1052,8 +1053,7 @@ mod tests {
fn should_trace_call_transaction() { fn should_trace_call_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1095,8 +1095,7 @@ mod tests {
fn should_trace_basic_call_transaction() { fn should_trace_basic_call_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1137,8 +1136,7 @@ mod tests {
fn should_trace_call_transaction_to_builtin() { fn should_trace_call_transaction_to_builtin() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1179,8 +1177,7 @@ mod tests {
fn should_not_trace_subcall_transaction_to_builtin() { fn should_not_trace_subcall_transaction_to_builtin() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1222,8 +1219,7 @@ mod tests {
fn should_not_trace_callcode() { fn should_not_trace_callcode() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1281,15 +1277,14 @@ mod tests {
fn should_not_trace_delegatecall() { fn should_not_trace_delegatecall() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
info.number = 0x789b0; info.number = 0x789b0;
let engine = &*Spec::new_test().engine; let engine = &*Spec::new_test().engine;
println!("schedule.have_delegate_call: {:?}", engine.schedule(&info).have_delegate_call); println!("schedule.have_delegate_call: {:?}", engine.schedule(info.number).have_delegate_call);
let t = Transaction { let t = Transaction {
nonce: 0.into(), nonce: 0.into(),
@ -1343,8 +1338,7 @@ mod tests {
fn should_trace_failed_call_transaction() { fn should_trace_failed_call_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1383,8 +1377,7 @@ mod tests {
fn should_trace_call_with_subcall_transaction() { fn should_trace_call_with_subcall_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1443,8 +1436,7 @@ mod tests {
fn should_trace_call_with_basic_subcall_transaction() { fn should_trace_call_with_basic_subcall_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1498,8 +1490,7 @@ mod tests {
fn should_not_trace_call_with_invalid_basic_subcall_transaction() { fn should_not_trace_call_with_invalid_basic_subcall_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1541,8 +1532,7 @@ mod tests {
fn should_trace_failed_subcall_transaction() { fn should_trace_failed_subcall_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1597,8 +1587,7 @@ mod tests {
fn should_trace_call_with_subcall_with_subcall_transaction() { fn should_trace_call_with_subcall_with_subcall_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1672,8 +1661,7 @@ mod tests {
fn should_trace_failed_subcall_with_subcall_transaction() { fn should_trace_failed_subcall_with_subcall_transaction() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1745,8 +1733,7 @@ mod tests {
fn should_trace_suicide() { fn should_trace_suicide() {
init_log(); init_log();
let temp = RandomTempPath::new(); let mut state = get_temp_state();
let mut state = get_temp_state_in(temp.as_path());
let mut info = EnvInfo::default(); let mut info = EnvInfo::default();
info.gas_limit = 1_000_000.into(); info.gas_limit = 1_000_000.into();
@ -1797,9 +1784,8 @@ mod tests {
#[test] #[test]
fn code_from_database() { fn code_from_database() {
let a = Address::zero(); let a = Address::zero();
let temp = RandomTempPath::new();
let (root, db) = { let (root, db) = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state();
state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}).unwrap(); state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}).unwrap();
state.init_code(&a, vec![1, 2, 3]).unwrap(); state.init_code(&a, vec![1, 2, 3]).unwrap();
assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec()))); assert_eq!(state.code(&a).unwrap(), Some(Arc::new([1u8, 2, 3].to_vec())));
@ -1815,9 +1801,8 @@ mod tests {
#[test] #[test]
fn storage_at_from_database() { fn storage_at_from_database() {
let a = Address::zero(); let a = Address::zero();
let temp = RandomTempPath::new();
let (root, db) = { let (root, db) = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state();
state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))).unwrap(); state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(69u64))).unwrap();
state.commit().unwrap(); state.commit().unwrap();
state.drop() state.drop()
@ -1830,9 +1815,8 @@ mod tests {
#[test] #[test]
fn get_from_database() { fn get_from_database() {
let a = Address::zero(); let a = Address::zero();
let temp = RandomTempPath::new();
let (root, db) = { let (root, db) = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state();
state.inc_nonce(&a).unwrap(); state.inc_nonce(&a).unwrap();
state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap();
state.commit().unwrap(); state.commit().unwrap();
@ -1848,8 +1832,7 @@ mod tests {
#[test] #[test]
fn remove() { fn remove() {
let a = Address::zero(); let a = Address::zero();
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
assert_eq!(state.exists(&a).unwrap(), false); assert_eq!(state.exists(&a).unwrap(), false);
assert_eq!(state.exists_and_not_null(&a).unwrap(), false); assert_eq!(state.exists_and_not_null(&a).unwrap(), false);
state.inc_nonce(&a).unwrap(); state.inc_nonce(&a).unwrap();
@ -1865,8 +1848,7 @@ mod tests {
#[test] #[test]
fn empty_account_is_not_created() { fn empty_account_is_not_created() {
let a = Address::zero(); let a = Address::zero();
let path = RandomTempPath::new(); let db = get_temp_state_db();
let db = get_temp_state_db_in(path.as_path());
let (root, db) = { let (root, db) = {
let mut state = State::new(db, U256::from(0), Default::default()); let mut state = State::new(db, U256::from(0), Default::default());
state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty).unwrap(); // create an empty account state.add_balance(&a, &U256::default(), CleanupMode::NoEmpty).unwrap(); // create an empty account
@ -1881,8 +1863,7 @@ mod tests {
#[test] #[test]
fn empty_account_exists_when_creation_forced() { fn empty_account_exists_when_creation_forced() {
let a = Address::zero(); let a = Address::zero();
let path = RandomTempPath::new(); let db = get_temp_state_db();
let db = get_temp_state_db_in(path.as_path());
let (root, db) = { let (root, db) = {
let mut state = State::new(db, U256::from(0), Default::default()); let mut state = State::new(db, U256::from(0), Default::default());
state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate).unwrap(); // create an empty account state.add_balance(&a, &U256::default(), CleanupMode::ForceCreate).unwrap(); // create an empty account
@ -1897,9 +1878,8 @@ mod tests {
#[test] #[test]
fn remove_from_database() { fn remove_from_database() {
let a = Address::zero(); let a = Address::zero();
let temp = RandomTempPath::new();
let (root, db) = { let (root, db) = {
let mut state = get_temp_state_in(temp.as_path()); let mut state = get_temp_state();
state.inc_nonce(&a).unwrap(); state.inc_nonce(&a).unwrap();
state.commit().unwrap(); state.commit().unwrap();
assert_eq!(state.exists(&a).unwrap(), true); assert_eq!(state.exists(&a).unwrap(), true);
@ -1925,8 +1905,7 @@ mod tests {
#[test] #[test]
fn alter_balance() { fn alter_balance() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
let a = Address::zero(); let a = Address::zero();
let b = 1u64.into(); let b = 1u64.into();
state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap();
@ -1947,8 +1926,7 @@ mod tests {
#[test] #[test]
fn alter_nonce() { fn alter_nonce() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
let a = Address::zero(); let a = Address::zero();
state.inc_nonce(&a).unwrap(); state.inc_nonce(&a).unwrap();
assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64)); assert_eq!(state.nonce(&a).unwrap(), U256::from(1u64));
@ -1964,8 +1942,7 @@ mod tests {
#[test] #[test]
fn balance_nonce() { fn balance_nonce() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
let a = Address::zero(); let a = Address::zero();
assert_eq!(state.balance(&a).unwrap(), U256::from(0u64)); assert_eq!(state.balance(&a).unwrap(), U256::from(0u64));
assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64)); assert_eq!(state.nonce(&a).unwrap(), U256::from(0u64));
@ -1976,8 +1953,7 @@ mod tests {
#[test] #[test]
fn ensure_cached() { fn ensure_cached() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
let a = Address::zero(); let a = Address::zero();
state.require(&a, false).unwrap(); state.require(&a, false).unwrap();
state.commit().unwrap(); state.commit().unwrap();
@ -1986,8 +1962,7 @@ mod tests {
#[test] #[test]
fn checkpoint_basic() { fn checkpoint_basic() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
let a = Address::zero(); let a = Address::zero();
state.checkpoint(); state.checkpoint();
state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap(); state.add_balance(&a, &U256::from(69u64), CleanupMode::NoEmpty).unwrap();
@ -2003,8 +1978,7 @@ mod tests {
#[test] #[test]
fn checkpoint_nested() { fn checkpoint_nested() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
let a = Address::zero(); let a = Address::zero();
state.checkpoint(); state.checkpoint();
state.checkpoint(); state.checkpoint();
@ -2018,16 +1992,14 @@ mod tests {
#[test] #[test]
fn create_empty() { fn create_empty() {
let mut state_result = get_temp_state(); let mut state = get_temp_state();
let mut state = state_result.reference_mut();
state.commit().unwrap(); state.commit().unwrap();
assert_eq!(state.root().hex(), "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"); assert_eq!(state.root().hex(), "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421");
} }
#[test] #[test]
fn should_not_panic_on_state_diff_with_storage() { fn should_not_panic_on_state_diff_with_storage() {
let state = get_temp_state(); let mut state = get_temp_state();
let mut state = state.reference().clone();
let a: Address = 0xa.into(); let a: Address = 0xa.into();
state.init_code(&a, b"abcdefg".to_vec()).unwrap();; state.init_code(&a, b"abcdefg".to_vec()).unwrap();;

View File

@ -448,7 +448,8 @@ impl state::Backend for StateDB {
fn is_known_null(&self, address: &Address) -> bool { fn is_known_null(&self, address: &Address) -> bool {
trace!(target: "account_bloom", "Check account bloom: {:?}", address); trace!(target: "account_bloom", "Check account bloom: {:?}", address);
let bloom = self.account_bloom.lock(); let bloom = self.account_bloom.lock();
!bloom.check(&*address.sha3()) let is_null = !bloom.check(&*address.sha3());
is_null
} }
} }
@ -463,8 +464,7 @@ mod tests {
fn state_db_smoke() { fn state_db_smoke() {
init_log(); init_log();
let mut state_db_result = get_temp_state_db(); let state_db = get_temp_state_db();
let state_db = state_db_result.take();
let root_parent = H256::random(); let root_parent = H256::random();
let address = Address::random(); let address = Address::random();
let h0 = H256::random(); let h0 = H256::random();

View File

@ -72,8 +72,7 @@ fn should_return_registrar() {
#[test] #[test]
fn returns_state_root_basic() { fn returns_state_root_basic() {
let client_result = generate_dummy_client(6); let client = generate_dummy_client(6);
let client = client_result.reference();
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let genesis_header = test_spec.genesis_header(); let genesis_header = test_spec.genesis_header();
@ -125,8 +124,7 @@ fn query_none_block() {
#[test] #[test]
fn query_bad_block() { fn query_bad_block() {
let client_result = get_test_client_with_blocks(vec![get_bad_state_dummy_block()]); let client = get_test_client_with_blocks(vec![get_bad_state_dummy_block()]);
let client = client_result.reference();
let bad_block: Option<_> = client.block_header(BlockId::Number(1)); let bad_block: Option<_> = client.block_header(BlockId::Number(1));
assert!(bad_block.is_none()); assert!(bad_block.is_none());
@ -135,8 +133,7 @@ fn query_bad_block() {
#[test] #[test]
fn returns_chain_info() { fn returns_chain_info() {
let dummy_block = get_good_dummy_block(); let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let block = BlockView::new(&dummy_block); let block = BlockView::new(&dummy_block);
let info = client.chain_info(); let info = client.chain_info();
assert_eq!(info.best_block_hash, block.header().hash()); assert_eq!(info.best_block_hash, block.header().hash());
@ -145,8 +142,7 @@ fn returns_chain_info() {
#[test] #[test]
fn returns_logs() { fn returns_logs() {
let dummy_block = get_good_dummy_block(); let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let logs = client.logs(Filter { let logs = client.logs(Filter {
from_block: BlockId::Earliest, from_block: BlockId::Earliest,
to_block: BlockId::Latest, to_block: BlockId::Latest,
@ -160,14 +156,13 @@ fn returns_logs() {
#[test] #[test]
fn returns_logs_with_limit() { fn returns_logs_with_limit() {
let dummy_block = get_good_dummy_block(); let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let logs = client.logs(Filter { let logs = client.logs(Filter {
from_block: BlockId::Earliest, from_block: BlockId::Earliest,
to_block: BlockId::Latest, to_block: BlockId::Latest,
address: None, address: None,
topics: vec![], topics: vec![],
limit: Some(2), limit: None,
}); });
assert_eq!(logs.len(), 0); assert_eq!(logs.len(), 0);
} }
@ -175,8 +170,7 @@ fn returns_logs_with_limit() {
#[test] #[test]
fn returns_block_body() { fn returns_block_body() {
let dummy_block = get_good_dummy_block(); let dummy_block = get_good_dummy_block();
let client_result = get_test_client_with_blocks(vec![dummy_block.clone()]); let client = get_test_client_with_blocks(vec![dummy_block.clone()]);
let client = client_result.reference();
let block = BlockView::new(&dummy_block); let block = BlockView::new(&dummy_block);
let body = client.block_body(BlockId::Hash(block.header().hash())).unwrap(); let body = client.block_body(BlockId::Hash(block.header().hash())).unwrap();
let body = body.rlp(); let body = body.rlp();
@ -187,8 +181,7 @@ fn returns_block_body() {
#[test] #[test]
fn imports_block_sequence() { fn imports_block_sequence() {
let client_result = generate_dummy_client(6); let client = generate_dummy_client(6);
let client = client_result.reference();
let block = client.block_header(BlockId::Number(5)).unwrap(); let block = client.block_header(BlockId::Number(5)).unwrap();
assert!(!block.into_inner().is_empty()); assert!(!block.into_inner().is_empty());
@ -196,8 +189,7 @@ fn imports_block_sequence() {
#[test] #[test]
fn can_collect_garbage() { fn can_collect_garbage() {
let client_result = generate_dummy_client(100); let client = generate_dummy_client(100);
let client = client_result.reference();
client.tick(); client.tick();
assert!(client.blockchain_cache_info().blocks < 100 * 1024); assert!(client.blockchain_cache_info().blocks < 100 * 1024);
} }
@ -205,19 +197,16 @@ fn can_collect_garbage() {
#[test] #[test]
fn can_generate_gas_price_median() { fn can_generate_gas_price_median() {
let client_result = generate_dummy_client_with_data(3, 1, slice_into![1, 2, 3]); let client = generate_dummy_client_with_data(3, 1, slice_into![1, 2, 3]);
let client = client_result.reference();
assert_eq!(Some(&U256::from(2)), client.gas_price_corpus(3).median()); assert_eq!(Some(&U256::from(2)), client.gas_price_corpus(3).median());
let client_result = generate_dummy_client_with_data(4, 1, slice_into![1, 4, 3, 2]); let client = generate_dummy_client_with_data(4, 1, slice_into![1, 4, 3, 2]);
let client = client_result.reference();
assert_eq!(Some(&U256::from(3)), client.gas_price_corpus(3).median()); assert_eq!(Some(&U256::from(3)), client.gas_price_corpus(3).median());
} }
#[test] #[test]
fn can_generate_gas_price_histogram() { fn can_generate_gas_price_histogram() {
let client_result = generate_dummy_client_with_data(20, 1, slice_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]); let client = generate_dummy_client_with_data(20, 1, slice_into![6354,8593,6065,4842,7845,7002,689,4958,4250,6098,5804,4320,643,8895,2296,8589,7145,2000,2512,1408]);
let client = client_result.reference();
let hist = client.gas_price_corpus(20).histogram(5).unwrap(); let hist = client.gas_price_corpus(20).histogram(5).unwrap();
let correct_hist = ::stats::Histogram { bucket_bounds: vec_into![643, 2294, 3945, 5596, 7247, 8898], counts: vec![4,2,4,6,4] }; let correct_hist = ::stats::Histogram { bucket_bounds: vec_into![643, 2294, 3945, 5596, 7247, 8898], counts: vec![4,2,4,6,4] };
@ -226,32 +215,29 @@ fn can_generate_gas_price_histogram() {
#[test] #[test]
fn empty_gas_price_histogram() { fn empty_gas_price_histogram() {
let client_result = generate_dummy_client_with_data(20, 0, slice_into![]); let client = generate_dummy_client_with_data(20, 0, slice_into![]);
let client = client_result.reference();
assert!(client.gas_price_corpus(20).histogram(5).is_none()); assert!(client.gas_price_corpus(20).histogram(5).is_none());
} }
#[test] #[test]
fn corpus_is_sorted() { fn corpus_is_sorted() {
let client_result = generate_dummy_client_with_data(2, 1, slice_into![U256::from_str("11426908979").unwrap(), U256::from_str("50426908979").unwrap()]); let client = generate_dummy_client_with_data(2, 1, slice_into![U256::from_str("11426908979").unwrap(), U256::from_str("50426908979").unwrap()]);
let client = client_result.reference();
let corpus = client.gas_price_corpus(20); let corpus = client.gas_price_corpus(20);
assert!(corpus[0] < corpus[1]); assert!(corpus[0] < corpus[1]);
} }
#[test] #[test]
fn can_handle_long_fork() { fn can_handle_long_fork() {
let client_result = generate_dummy_client(1200); let client = generate_dummy_client(1200);
let client = client_result.reference();
for _ in 0..20 { for _ in 0..20 {
client.import_verified_blocks(); client.import_verified_blocks();
} }
assert_eq!(1200, client.chain_info().best_block_number); assert_eq!(1200, client.chain_info().best_block_number);
push_blocks_to_client(client, 45, 1201, 800); push_blocks_to_client(&client, 45, 1201, 800);
push_blocks_to_client(client, 49, 1201, 800); push_blocks_to_client(&client, 49, 1201, 800);
push_blocks_to_client(client, 53, 1201, 600); push_blocks_to_client(&client, 53, 1201, 600);
for _ in 0..400 { for _ in 0..400 {
client.import_verified_blocks(); client.import_verified_blocks();
@ -262,8 +248,7 @@ fn can_handle_long_fork() {
#[test] #[test]
fn can_mine() { fn can_mine() {
let dummy_blocks = get_good_dummy_block_seq(2); let dummy_blocks = get_good_dummy_block_seq(2);
let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); let client = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]);
let client = client_result.reference();
let b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]).close(); let b = client.prepare_open_block(Address::default(), (3141562.into(), 31415620.into()), vec![]).close();
@ -329,14 +314,13 @@ fn does_not_propagate_delayed_transactions() {
value: 0.into(), value: 0.into(),
data: Vec::new(), data: Vec::new(),
}.sign(secret, None), None); }.sign(secret, None), None);
let client_result = generate_dummy_client(1); let client = generate_dummy_client(1);
let client = client_result.reference();
client.miner().import_own_transaction(&**client, tx0).unwrap(); client.miner().import_own_transaction(&*client, tx0).unwrap();
client.miner().import_own_transaction(&**client, tx1).unwrap(); client.miner().import_own_transaction(&*client, tx1).unwrap();
assert_eq!(0, client.ready_transactions().len()); assert_eq!(0, client.ready_transactions().len());
assert_eq!(2, client.miner().pending_transactions().len()); assert_eq!(2, client.miner().pending_transactions().len());
push_blocks_to_client(client, 53, 2, 2); push_blocks_to_client(&client, 53, 2, 2);
client.flush_queue(); client.flush_queue();
assert_eq!(2, client.ready_transactions().len()); assert_eq!(2, client.ready_transactions().len());
assert_eq!(2, client.miner().pending_transactions().len()); assert_eq!(2, client.miner().pending_transactions().len());
@ -346,8 +330,7 @@ fn does_not_propagate_delayed_transactions() {
fn transaction_proof() { fn transaction_proof() {
use ::client::ProvingBlockChainClient; use ::client::ProvingBlockChainClient;
let client_result = generate_dummy_client(0); let client = generate_dummy_client(0);
let client = client_result.reference();
let address = Address::random(); let address = Address::random();
let test_spec = Spec::new_test(); let test_spec = Spec::new_test();
for _ in 0..20 { for _ in 0..20 {
@ -367,7 +350,7 @@ fn transaction_proof() {
data: Vec::new(), data: Vec::new(),
}.fake_sign(address); }.fake_sign(address);
let proof = client.prove_transaction(transaction.clone(), BlockId::Latest).unwrap(); let proof = client.prove_transaction(transaction.clone(), BlockId::Latest).unwrap().1;
let backend = state::backend::ProofCheck::new(&proof); let backend = state::backend::ProofCheck::new(&proof);
let mut factories = ::factory::Factories::default(); let mut factories = ::factory::Factories::default();

View File

@ -27,10 +27,8 @@ use builtin::Builtin;
use state::*; use state::*;
use evm::Schedule; use evm::Schedule;
use engines::Engine; use engines::Engine;
use env_info::EnvInfo;
use ethereum; use ethereum;
use ethereum::ethash::EthashParams; use ethereum::ethash::EthashParams;
use devtools::*;
use miner::Miner; use miner::Miner;
use header::Header; use header::Header;
use transaction::{Action, Transaction, SignedTransaction}; use transaction::{Action, Transaction, SignedTransaction};
@ -73,7 +71,7 @@ impl Engine for TestEngine {
self.engine.builtins() self.engine.builtins()
} }
fn schedule(&self, _env_info: &EnvInfo) -> Schedule { fn schedule(&self, _block_number: u64) -> Schedule {
let mut schedule = Schedule::new_frontier(); let mut schedule = Schedule::new_frontier();
schedule.max_depth = self.max_depth; schedule.max_depth = self.max_depth;
schedule schedule
@ -133,28 +131,26 @@ pub fn create_test_block_with_data(header: &Header, transactions: &[SignedTransa
rlp.out() rlp.out()
} }
pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>> { pub fn generate_dummy_client(block_number: u32) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[]) generate_dummy_client_with_spec_and_data(Spec::new_test, block_number, 0, &[])
} }
pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> GuardedTempResult<Arc<Client>> { pub fn generate_dummy_client_with_data(block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> {
generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices) generate_dummy_client_with_spec_and_data(Spec::new_null, block_number, txs_per_block, tx_gas_prices)
} }
pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> GuardedTempResult<Arc<Client>> where F: Fn()->Spec { pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, None, block_number, txs_per_block, tx_gas_prices) generate_dummy_client_with_spec_accounts_and_data(get_test_spec, None, block_number, txs_per_block, tx_gas_prices)
} }
pub fn generate_dummy_client_with_spec_and_accounts<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>) -> GuardedTempResult<Arc<Client>> where F: Fn()->Spec { pub fn generate_dummy_client_with_spec_and_accounts<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>) -> Arc<Client> where F: Fn()->Spec {
generate_dummy_client_with_spec_accounts_and_data(get_test_spec, accounts, 0, 0, &[]) generate_dummy_client_with_spec_accounts_and_data(get_test_spec, accounts, 0, 0, &[])
} }
pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> GuardedTempResult<Arc<Client>> where F: Fn()->Spec { pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, accounts: Option<Arc<AccountProvider>>, block_number: u32, txs_per_block: usize, tx_gas_prices: &[U256]) -> Arc<Client> where F: Fn()->Spec {
let dir = RandomTempPath::new();
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let client_db = new_db();
let client_db = Arc::new(Database::open(&db_config, dir.as_path().to_str().unwrap()).unwrap());
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),
@ -165,8 +161,7 @@ pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, ac
).unwrap(); ).unwrap();
let test_engine = &*test_spec.engine; let test_engine = &*test_spec.engine;
let mut db_result = get_temp_state_db(); let mut db = test_spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let mut db = test_spec.ensure_db_good(db_result.take(), &Default::default()).unwrap();
let genesis_header = test_spec.genesis_header(); let genesis_header = test_spec.genesis_header();
let mut rolling_timestamp = 40; let mut rolling_timestamp = 40;
@ -205,7 +200,7 @@ pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, ac
action: Action::Create, action: Action::Create,
data: vec![], data: vec![],
value: U256::zero(), value: U256::zero(),
}.sign(kp.secret(), None), None).unwrap(); }.sign(kp.secret(), Some(test_spec.network_id())), None).unwrap();
n += 1; n += 1;
} }
@ -220,11 +215,7 @@ pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, ac
} }
client.flush_queue(); client.flush_queue();
client.import_verified_blocks(); client.import_verified_blocks();
client
GuardedTempResult::<Arc<Client>> {
_temp: dir,
result: Some(client)
}
} }
pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) { pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting_number: usize, block_number: usize) {
@ -256,11 +247,9 @@ pub fn push_blocks_to_client(client: &Arc<Client>, timestamp_salt: u64, starting
} }
} }
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<Client>> { pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
let dir = RandomTempPath::new();
let test_spec = get_test_spec(); let test_spec = get_test_spec();
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let client_db = new_db();
let client_db = Arc::new(Database::open(&db_config, dir.as_path().to_str().unwrap()).unwrap());
let client = Client::new( let client = Client::new(
ClientConfig::default(), ClientConfig::default(),
@ -277,23 +266,15 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<
} }
client.flush_queue(); client.flush_queue();
client.import_verified_blocks(); client.import_verified_blocks();
client
GuardedTempResult::<Arc<Client>> {
_temp: dir,
result: Some(client)
}
} }
fn new_db(path: &str) -> Arc<Database> { fn new_db() -> Arc<KeyValueDB> {
Arc::new( Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)))
Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path)
.expect("Opening database for tests should always work.")
)
} }
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> { pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain {
let temp = RandomTempPath::new(); let db = new_db();
let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
let mut batch = db.transaction(); let mut batch = db.transaction();
@ -302,16 +283,11 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockCh
bc.commit(); bc.commit();
} }
db.write(batch).unwrap(); db.write(batch).unwrap();
bc
GuardedTempResult::<BlockChain> {
_temp: temp,
result: Some(bc)
}
} }
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> { pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain {
let temp = RandomTempPath::new(); let db = new_db();
let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
@ -321,66 +297,26 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
bc.commit(); bc.commit();
} }
db.write(batch).unwrap(); db.write(batch).unwrap();
bc
GuardedTempResult::<BlockChain> {
_temp: temp,
result: Some(bc)
}
} }
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> { pub fn generate_dummy_empty_blockchain() -> BlockChain {
let temp = RandomTempPath::new(); let db = new_db();
let db = new_db(temp.as_str());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
bc
GuardedTempResult::<BlockChain> {
_temp: temp,
result: Some(bc)
}
} }
pub fn get_temp_state_db() -> GuardedTempResult<StateDB> { pub fn get_temp_state() -> State<::state_db::StateDB> {
let temp = RandomTempPath::new(); let journal_db = get_temp_state_db();
let journal_db = get_temp_state_db_in(temp.as_path());
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
}
}
pub fn get_temp_state() -> GuardedTempResult<State<::state_db::StateDB>> {
let temp = RandomTempPath::new();
let journal_db = get_temp_state_db_in(temp.as_path());
GuardedTempResult {
_temp: temp,
result: Some(State::new(journal_db, U256::from(0), Default::default())),
}
}
pub fn get_temp_mem_state() -> State<::state_db::StateDB> {
let journal_db = get_temp_mem_state_db();
State::new(journal_db, U256::from(0), Default::default()) State::new(journal_db, U256::from(0), Default::default())
} }
pub fn get_temp_state_db_in(path: &Path) -> StateDB { pub fn get_temp_state_db() -> StateDB {
let db = new_db(path.to_str().expect("Only valid utf8 paths for tests.")); let db = new_db();
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024)
}
pub fn get_temp_mem_state_db() -> StateDB {
let db = Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE); let journal_db = journaldb::new(db, journaldb::Algorithm::EarlyMerge, ::db::COL_STATE);
StateDB::new(journal_db, 5 * 1024 * 1024) StateDB::new(journal_db, 5 * 1024 * 1024)
} }
pub fn get_temp_state_in(path: &Path) -> State<::state_db::StateDB> {
let journal_db = get_temp_state_db_in(path);
State::new(journal_db, U256::from(0), Default::default())
}
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> { pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec(); let test_spec = get_test_spec();
get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash()) get_good_dummy_block_fork_seq(1, count, &test_spec.genesis_header().hash())
@ -405,7 +341,6 @@ pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_h
rolling_timestamp = rolling_timestamp + 10; rolling_timestamp = rolling_timestamp + 10;
r.push(create_test_block(&block_header)); r.push(create_test_block(&block_header));
} }
r r
} }

View File

@ -19,13 +19,16 @@
use std::ops::Deref; use std::ops::Deref;
use rlp::*; use rlp::*;
use util::sha3::Hashable; use util::sha3::Hashable;
use util::{H256, Address, U256, Bytes, HeapSizeOf}; use util::{H256, Address, U256, Bytes, HeapSizeOf, Uint};
use ethkey::{Signature, Secret, Public, recover, public_to_address, Error as EthkeyError}; use ethkey::{Signature, Secret, Public, recover, public_to_address, Error as EthkeyError};
use error::*; use error::*;
use evm::Schedule; use evm::Schedule;
use header::BlockNumber; use header::BlockNumber;
use ethjson; use ethjson;
/// Fake address for unsigned transactions as defined by EIP-86.
pub const UNSIGNED_SENDER: Address = ::util::H160([0xff; 20]);
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "ipc", binary)] #[cfg_attr(feature = "ipc", binary)]
/// Transaction action type. /// Transaction action type.
@ -110,8 +113,8 @@ impl HeapSizeOf for Transaction {
impl From<ethjson::state::Transaction> for SignedTransaction { impl From<ethjson::state::Transaction> for SignedTransaction {
fn from(t: ethjson::state::Transaction) -> Self { fn from(t: ethjson::state::Transaction) -> Self {
let to: Option<ethjson::hash::Address> = t.to.into(); let to: Option<ethjson::hash::Address> = t.to.into();
let secret = Secret::from_slice(&t.secret.0).expect("Valid secret expected."); let secret = t.secret.map(|s| Secret::from_slice(&s.0).expect("Valid secret expected."));
Transaction { let tx = Transaction {
nonce: t.nonce.into(), nonce: t.nonce.into(),
gas_price: t.gas_price.into(), gas_price: t.gas_price.into(),
gas: t.gas_limit.into(), gas: t.gas_limit.into(),
@ -121,7 +124,11 @@ impl From<ethjson::state::Transaction> for SignedTransaction {
}, },
value: t.value.into(), value: t.value.into(),
data: t.data.into(), data: t.data.into(),
}.sign(&secret, None) };
match secret {
Some(s) => tx.sign(&s, None),
None => tx.null_sign(1),
}
} }
} }
@ -180,8 +187,8 @@ impl Transaction {
pub fn invalid_sign(self) -> UnverifiedTransaction { pub fn invalid_sign(self) -> UnverifiedTransaction {
UnverifiedTransaction { UnverifiedTransaction {
unsigned: self, unsigned: self,
r: U256::default(), r: U256::one(),
s: U256::default(), s: U256::one(),
v: 0, v: 0,
hash: 0.into(), hash: 0.into(),
}.compute_hash() }.compute_hash()
@ -192,13 +199,28 @@ impl Transaction {
SignedTransaction { SignedTransaction {
transaction: UnverifiedTransaction { transaction: UnverifiedTransaction {
unsigned: self, unsigned: self,
r: U256::default(), r: U256::one(),
s: U256::default(), s: U256::one(),
v: 0, v: 0,
hash: 0.into(), hash: 0.into(),
}.compute_hash(), }.compute_hash(),
sender: from, sender: from,
public: Public::default(), public: None,
}
}
/// Add EIP-86 compatible empty signature.
pub fn null_sign(self, network_id: u64) -> SignedTransaction {
SignedTransaction {
transaction: UnverifiedTransaction {
unsigned: self,
r: U256::zero(),
s: U256::zero(),
v: network_id,
hash: 0.into(),
}.compute_hash(),
sender: UNSIGNED_SENDER,
public: None,
} }
} }
@ -276,6 +298,11 @@ impl UnverifiedTransaction {
self self
} }
/// Checks is signature is empty.
pub fn is_unsigned(&self) -> bool {
self.r.is_zero() && self.s.is_zero()
}
/// Append object with a signature into RLP stream /// Append object with a signature into RLP stream
fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) { fn rlp_append_sealed_transaction(&self, s: &mut RlpStream) {
s.begin_list(9); s.begin_list(9);
@ -307,6 +334,7 @@ impl UnverifiedTransaction {
/// The network ID, or `None` if this is a global transaction. /// The network ID, or `None` if this is a global transaction.
pub fn network_id(&self) -> Option<u64> { pub fn network_id(&self) -> Option<u64> {
match self.v { match self.v {
v if self.is_unsigned() => Some(v),
v if v > 36 => Some((v - 35) / 2), v if v > 36 => Some((v - 35) / 2),
_ => None, _ => None,
} }
@ -340,21 +368,33 @@ impl UnverifiedTransaction {
// TODO: consider use in block validation. // TODO: consider use in block validation.
#[cfg(test)] #[cfg(test)]
#[cfg(feature = "json-tests")] #[cfg(feature = "json-tests")]
pub fn validate(self, schedule: &Schedule, require_low: bool, allow_network_id_of_one: bool) -> Result<UnverifiedTransaction, Error> { pub fn validate(self, schedule: &Schedule, require_low: bool, allow_network_id_of_one: bool, allow_empty_signature: bool) -> Result<UnverifiedTransaction, Error> {
if require_low && !self.signature().is_low_s() { let chain_id = if allow_network_id_of_one { Some(1) } else { None };
return Err(EthkeyError::InvalidSignature.into()) self.verify_basic(require_low, chain_id, allow_empty_signature)?;
} if !allow_empty_signature || !self.is_unsigned() {
match self.network_id() {
None => {},
Some(1) if allow_network_id_of_one => {},
_ => return Err(TransactionError::InvalidNetworkId.into()),
}
self.recover_public()?; self.recover_public()?;
}
if self.gas < U256::from(self.gas_required(&schedule)) { if self.gas < U256::from(self.gas_required(&schedule)) {
Err(TransactionError::InvalidGasLimit(::util::OutOfBounds{min: Some(U256::from(self.gas_required(&schedule))), max: None, found: self.gas}).into()) return Err(TransactionError::InvalidGasLimit(::util::OutOfBounds{min: Some(U256::from(self.gas_required(&schedule))), max: None, found: self.gas}).into())
} else { }
Ok(self) Ok(self)
} }
/// Verify basic signature params. Does not attempt sender recovery.
pub fn verify_basic(&self, check_low_s: bool, chain_id: Option<u64>, allow_empty_signature: bool) -> Result<(), Error> {
if check_low_s && !(allow_empty_signature && self.is_unsigned()) {
self.check_low_s()?;
}
// EIP-86: Transactions of this form MUST have gasprice = 0, nonce = 0, value = 0, and do NOT increment the nonce of account 0.
if allow_empty_signature && self.is_unsigned() && !(self.gas_price.is_zero() && self.value.is_zero() && self.nonce.is_zero()) {
return Err(EthkeyError::InvalidSignature.into())
}
match (self.network_id(), chain_id) {
(None, _) => {},
(Some(n), Some(m)) if n == m => {},
_ => return Err(TransactionError::InvalidNetworkId.into()),
};
Ok(())
} }
} }
@ -363,7 +403,7 @@ impl UnverifiedTransaction {
pub struct SignedTransaction { pub struct SignedTransaction {
transaction: UnverifiedTransaction, transaction: UnverifiedTransaction,
sender: Address, sender: Address,
public: Public, public: Option<Public>,
} }
impl HeapSizeOf for SignedTransaction { impl HeapSizeOf for SignedTransaction {
@ -392,14 +432,22 @@ impl From<SignedTransaction> for UnverifiedTransaction {
impl SignedTransaction { impl SignedTransaction {
/// Try to verify transaction and recover sender. /// Try to verify transaction and recover sender.
pub fn new(transaction: UnverifiedTransaction) -> Result<Self, Error> { pub fn new(transaction: UnverifiedTransaction) -> Result<Self, Error> {
if transaction.is_unsigned() {
Ok(SignedTransaction {
transaction: transaction,
sender: UNSIGNED_SENDER,
public: None,
})
} else {
let public = transaction.recover_public()?; let public = transaction.recover_public()?;
let sender = public_to_address(&public); let sender = public_to_address(&public);
Ok(SignedTransaction { Ok(SignedTransaction {
transaction: transaction, transaction: transaction,
sender: sender, sender: sender,
public: public, public: Some(public),
}) })
} }
}
/// Returns transaction sender. /// Returns transaction sender.
pub fn sender(&self) -> Address { pub fn sender(&self) -> Address {
@ -407,9 +455,14 @@ impl SignedTransaction {
} }
/// Returns a public key of the sender. /// Returns a public key of the sender.
pub fn public_key(&self) -> Public { pub fn public_key(&self) -> Option<Public> {
self.public self.public
} }
/// Checks is signature is empty.
pub fn is_unsigned(&self) -> bool {
self.transaction.is_unsigned()
}
} }
/// Signed Transaction that is a part of canon blockchain. /// Signed Transaction that is a part of canon blockchain.
@ -435,6 +488,9 @@ impl LocalizedTransaction {
if let Some(sender) = self.cached_sender { if let Some(sender) = self.cached_sender {
return sender; return sender;
} }
if self.is_unsigned() {
return UNSIGNED_SENDER.clone();
}
let sender = public_to_address(&self.recover_public() let sender = public_to_address(&self.recover_public()
.expect("LocalizedTransaction is always constructed from transaction from blockchain; Blockchain only stores verified transactions; qed")); .expect("LocalizedTransaction is always constructed from transaction from blockchain; Blockchain only stores verified transactions; qed"));
self.cached_sender = Some(sender); self.cached_sender = Some(sender);

View File

@ -34,4 +34,8 @@ impl Verifier for CanonVerifier {
fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error> { fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error> {
verification::verify_block_final(expected, got) verification::verify_block_final(expected, got)
} }
fn verify_block_external(&self, header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> {
engine.verify_block_external(header, Some(bytes))
}
} }

View File

@ -34,4 +34,8 @@ impl Verifier for NoopVerifier {
fn verify_block_final(&self, _expected: &Header, _got: &Header) -> Result<(), Error> { fn verify_block_final(&self, _expected: &Header, _got: &Header) -> Result<(), Error> {
Ok(()) Ok(())
} }
fn verify_block_external(&self, _header: &Header, _bytes: &[u8], _engine: &Engine) -> Result<(), Error> {
Ok(())
}
} }

View File

@ -27,4 +27,6 @@ pub trait Verifier: Send + Sync {
fn verify_block_family(&self, header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error>; fn verify_block_family(&self, header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error>;
/// Do a final verification check for an enacted header vs its expected counterpart. /// Do a final verification check for an enacted header vs its expected counterpart.
fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>;
/// Verify a block, inspecing external state.
fn verify_block_external(&self, header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error>;
} }

View File

@ -34,6 +34,9 @@ pub const KEY_LENGTH: usize = 32;
pub const KEY_ITERATIONS: usize = 10240; pub const KEY_ITERATIONS: usize = 10240;
pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2;
/// Default MAC to use (in RPC).
pub const DEFAULT_MAC: [u8; 2] = [0, 0];
#[derive(PartialEq, Debug)] #[derive(PartialEq, Debug)]
pub enum ScryptError { pub enum ScryptError {
// log(N) < r / 16 // log(N) < r / 16

View File

@ -57,8 +57,8 @@ mod tests {
#[test] #[test]
fn test_brain() { fn test_brain() {
let words = "this is sparta!".to_owned(); let words = "this is sparta!".to_owned();
let first_keypair = Brain(words.clone()).generate().unwrap(); let first_keypair = Brain::new(words.clone()).generate().unwrap();
let second_keypair = Brain(words.clone()).generate().unwrap(); let second_keypair = Brain::new(words.clone()).generate().unwrap();
assert_eq!(first_keypair.secret(), second_keypair.secret()); assert_eq!(first_keypair.secret(), second_keypair.secret());
} }
} }

View File

@ -25,6 +25,7 @@ use rustc_serialize::hex::{ToHex, FromHex};
use bigint::hash::{H520, H256}; use bigint::hash::{H520, H256};
use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address}; use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address};
/// Signature encoded as RSV components
#[repr(C)] #[repr(C)]
pub struct Signature([u8; 65]); pub struct Signature([u8; 65]);
@ -44,8 +45,32 @@ impl Signature {
self.0[64] self.0[64]
} }
/// Encode the signature into VRS array (V altered to be in "Electrum" notation).
pub fn into_vrs(self) -> [u8; 65] {
let mut vrs = [0u8; 65];
vrs[0] = self.v() + 27;
vrs[1..33].copy_from_slice(self.r());
vrs[33..65].copy_from_slice(self.s());
vrs
}
/// Parse bytes as a signature encoded as VRS (V in "Electrum" notation).
/// May return empty (invalid) signature if given data has invalid length.
pub fn from_vrs(data: &[u8]) -> Self {
if data.len() != 65 || data[0] < 27 {
// fallback to empty (invalid) signature
return Signature::default();
}
let mut sig = [0u8; 65];
sig[0..32].copy_from_slice(&data[1..33]);
sig[32..64].copy_from_slice(&data[33..65]);
sig[64] = data[0] - 27;
Signature(sig)
}
/// Create a signature object from the sig. /// Create a signature object from the sig.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Signature { pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self {
let mut sig = [0u8; 65]; let mut sig = [0u8; 65];
sig[0..32].copy_from_slice(&r); sig[0..32].copy_from_slice(&r);
sig[32..64].copy_from_slice(&s); sig[32..64].copy_from_slice(&s);
@ -222,6 +247,21 @@ mod tests {
use {Generator, Random, Message}; use {Generator, Random, Message};
use super::{sign, verify_public, verify_address, recover, Signature}; use super::{sign, verify_public, verify_address, recover, Signature};
#[test]
fn vrs_conversion() {
// given
let keypair = Random.generate().unwrap();
let message = Message::default();
let signature = sign(keypair.secret(), &message).unwrap();
// when
let vrs = signature.clone().into_vrs();
let from_vrs = Signature::from_vrs(&vrs);
// then
assert_eq!(signature, from_vrs);
}
#[test] #[test]
fn signature_to_and_from_str() { fn signature_to_and_from_str() {
let keypair = Random.generate().unwrap(); let keypair = Random.generate().unwrap();

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use std::collections::HashMap; use std::collections::HashMap;
use util::{U256, H256, Address, Bytes, trie}; use util::{U256, H256, Address, Bytes, trie};
use ethcore::client::EnvInfo; use ethcore::client::EnvInfo;
use ethcore::evm::{self, Ext, ContractCreateResult, MessageCallResult, Schedule, CallType}; use ethcore::evm::{self, Ext, ContractCreateResult, MessageCallResult, Schedule, CallType, CreateContractAddress};
pub struct FakeExt { pub struct FakeExt {
schedule: Schedule, schedule: Schedule,
@ -31,7 +31,7 @@ pub struct FakeExt {
impl Default for FakeExt { impl Default for FakeExt {
fn default() -> Self { fn default() -> Self {
FakeExt { FakeExt {
schedule: Schedule::new_post_eip150(usize::max_value(), true, true, true), schedule: Schedule::new_post_eip150(usize::max_value(), true, true, true, true),
store: HashMap::new(), store: HashMap::new(),
depth: 1, depth: 1,
} }
@ -68,7 +68,7 @@ impl Ext for FakeExt {
unimplemented!(); unimplemented!();
} }
fn create(&mut self, _gas: &U256, _value: &U256, _code: &[u8]) -> ContractCreateResult { fn create(&mut self, _gas: &U256, _value: &U256, _code: &[u8], _address: CreateContractAddress) -> ContractCreateResult {
unimplemented!(); unimplemented!();
} }

View File

@ -211,7 +211,7 @@ mod tests {
} }
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result { fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
assert_eq!(url, "https://ethcore.io/assets/images/ethcore-black-horizontal.png"); assert_eq!(url, "https://parity.io/assets/images/ethcore-black-horizontal.png");
future::ok(if self.return_success { future::ok(if self.return_success {
let cursor = ::std::io::Cursor::new(b"result"); let cursor = ::std::io::Cursor::new(b"result");
fetch::Response::from_reader(cursor) fetch::Response::from_reader(cursor)

View File

@ -370,7 +370,7 @@ pub mod tests {
// then // then
assert_eq!(res, Some(URLHintResult::Content(Content { assert_eq!(res, Some(URLHintResult::Content(Content {
url: "https://ethcore.io/assets/images/ethcore-black-horizontal.png".into(), url: "https://parity.io/assets/images/ethcore-black-horizontal.png".into(),
mime: mime!(Image/Png), mime: mime!(Image/Png),
owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(),
}))) })))
@ -395,11 +395,11 @@ pub mod tests {
#[test] #[test]
fn should_guess_mime_type_from_url() { fn should_guess_mime_type_from_url() {
let url1 = "https://ethcore.io/parity"; let url1 = "https://parity.io/parity";
let url2 = "https://ethcore.io/parity#content-type=image/png"; let url2 = "https://parity.io/parity#content-type=image/png";
let url3 = "https://ethcore.io/parity#something&content-type=image/png"; let url3 = "https://parity.io/parity#something&content-type=image/png";
let url4 = "https://ethcore.io/parity.png#content-type=image/jpeg"; let url4 = "https://parity.io/parity.png#content-type=image/jpeg";
let url5 = "https://ethcore.io/parity.png"; let url5 = "https://parity.io/parity.png";
assert_eq!(guess_mime_type(url1), None); assert_eq!(guess_mime_type(url1), None);

1
js/.gitignore vendored
View File

@ -8,4 +8,3 @@ docs
.happypack .happypack
.npmjs .npmjs
.eslintcache .eslintcache
yarn.lock

View File

@ -1,6 +1,6 @@
{ {
"name": "parity.js", "name": "parity.js",
"version": "1.7.53", "version": "1.7.70",
"main": "release/index.js", "main": "release/index.js",
"jsnext:main": "src/index.js", "jsnext:main": "src/index.js",
"author": "Parity Team <admin@parity.io>", "author": "Parity Team <admin@parity.io>",
@ -48,7 +48,7 @@
"ci:build:embed": "NODE_ENV=production EMBED=1 node webpack/embed", "ci:build:embed": "NODE_ENV=production EMBED=1 node webpack/embed",
"start": "npm install && npm run build:lib && npm run build:dll && npm run start:app", "start": "npm install && npm run build:lib && npm run build:dll && npm run start:app",
"start:app": "node webpack/dev.server", "start:app": "node webpack/dev.server",
"clean": "rm -rf ./.build ./.coverage ./.happypack ./.npmjs ./build ./node_modules/.cache", "clean": "rm -rf ./.build ./.coverage ./.happypack ./.npmjs ./build ./node_modules/.cache ./node_modules/@parity",
"coveralls": "npm run testCoverage && coveralls < coverage/lcov.info", "coveralls": "npm run testCoverage && coveralls < coverage/lcov.info",
"lint": "npm run lint:css && npm run lint:js", "lint": "npm run lint:css && npm run lint:js",
"lint:cached": "npm run lint:css && npm run lint:js:cached", "lint:cached": "npm run lint:css && npm run lint:js:cached",
@ -164,6 +164,7 @@
"blockies": "0.0.2", "blockies": "0.0.2",
"brace": "0.9.0", "brace": "0.9.0",
"bytes": "2.4.0", "bytes": "2.4.0",
"date-difference": "1.0.0",
"debounce": "1.0.0", "debounce": "1.0.0",
"es6-error": "4.0.0", "es6-error": "4.0.0",
"es6-promise": "4.0.5", "es6-promise": "4.0.5",
@ -228,6 +229,7 @@
"web3": "0.17.0-beta", "web3": "0.17.0-beta",
"whatwg-fetch": "2.0.1", "whatwg-fetch": "2.0.1",
"worker-loader": "^0.8.0", "worker-loader": "^0.8.0",
"yarn": "^0.21.3",
"zxcvbn": "4.4.1" "zxcvbn": "4.4.1"
} }
} }

View File

@ -22,6 +22,6 @@ export default (
<li>We collect your email address when you use this service. This is temporarily kept in memory, and then encrypted and stored in our EU servers. We only retain the cryptographic hash of the email address to prevent duplicated accounts. The cryptographic hash of your email address is also stored on the blockchain which is public by design. You consent to this use.</li> <li>We collect your email address when you use this service. This is temporarily kept in memory, and then encrypted and stored in our EU servers. We only retain the cryptographic hash of the email address to prevent duplicated accounts. The cryptographic hash of your email address is also stored on the blockchain which is public by design. You consent to this use.</li>
<li>You pay a fee for the cost of this service using the account you want to verify.</li> <li>You pay a fee for the cost of this service using the account you want to verify.</li>
<li>Your email address is transmitted to a third party EU email verification service mailjet for the sole purpose of the email verification. You consent to this use. Mailjet's privacy policy is here: <a href='https://www.mailjet.com/privacy-policy'>https://www.mailjet.com/privacy-policy</a>.</li> <li>Your email address is transmitted to a third party EU email verification service mailjet for the sole purpose of the email verification. You consent to this use. Mailjet's privacy policy is here: <a href='https://www.mailjet.com/privacy-policy'>https://www.mailjet.com/privacy-policy</a>.</li>
<li><i>Parity Technology Limited</i> is registered in England and Wales under company number <code>09760015</code> and complies with the Data Protection Act 1998 (UK). You may contact us via email at <a href={ 'mailto:admin@parity.io' }>admin@parity.io</a>. Our general privacy policy can be found here: <a href={ 'https://ethcore.io/legal.html' }>https://ethcore.io/legal.html</a>.</li> <li><i>Parity Technology Limited</i> is registered in England and Wales under company number <code>09760015</code> and complies with the Data Protection Act 1998 (UK). You may contact us via email at <a href={ 'mailto:admin@parity.io' }>admin@parity.io</a>. Our general privacy policy can be found here: <a href={ 'https://parity.io/legal.html' }>https://parity.io/legal.html</a>.</li>
</ul> </ul>
); );

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
import { stringify } from 'qs'; import { stringify } from 'qs';
import { apiLink } from './links';
const options = { const options = {
method: 'GET', method: 'GET',
@ -24,31 +25,11 @@ const options = {
}; };
export function call (module, action, _params, test, netVersion) { export function call (module, action, _params, test, netVersion) {
let prefix = 'api.';
switch (netVersion) {
case '2':
case '3':
prefix = 'testnet.';
break;
case '42':
prefix = 'kovan.';
break;
case '0':
default:
if (test) {
prefix = 'testnet.';
}
break;
}
const query = stringify(Object.assign({ const query = stringify(Object.assign({
module, action module, action
}, _params || {})); }, _params || {}));
return fetch(`https://${prefix}etherscan.io/api?${query}`, options) return fetch(apiLink(query, test, netVersion), options)
.then((response) => { .then((response) => {
if (!response.ok) { if (!response.ok) {
throw { code: response.status, message: response.statusText }; // eslint-disable-line throw { code: response.status, message: response.statusText }; // eslint-disable-line

Some files were not shown because too many files have changed in this diff Show More