Compare commits
108 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
7919629f3d | ||
|
e90c43309a | ||
|
c48a69684f | ||
|
ca436e87a7 | ||
|
014d0ac160 | ||
|
302dbd5f05 | ||
|
57746382d3 | ||
|
df9297762d | ||
|
651cb39797 | ||
|
440e4aea51 | ||
|
8767799394 | ||
|
024b00cc3a | ||
|
c790d07200 | ||
|
e4c174cec3 | ||
|
fdacb9b649 | ||
|
e6273039c8 | ||
|
f1530394b8 | ||
|
fc53f27c86 | ||
|
4a67b1b8f7 | ||
|
d40d9e1c8c | ||
|
d05c8b2de4 | ||
|
b60cda6cf8 | ||
|
ff821daf1d | ||
|
20421fcb6d | ||
|
a295dc8c02 | ||
|
49357a7a9a | ||
|
a667de45fe | ||
|
ff71e787db | ||
|
0940fe91e5 | ||
|
f205490fb0 | ||
|
dc8d6d819e | ||
|
feef2f8791 | ||
|
3c15a501ca | ||
|
b7682dabf4 | ||
|
2acfdfc977 | ||
|
a6d732ed14 | ||
|
a9a58d230e | ||
|
edbff0d34d | ||
|
94defd7f82 | ||
|
362fb1dfed | ||
|
caaac78971 | ||
|
1762e82676 | ||
|
6f21a32b2b | ||
|
86f6cea29d | ||
|
3d6670972f | ||
|
804ddfe31e | ||
|
3bfb2fa1aa | ||
|
9d697c5d0a | ||
|
6a29fea23c | ||
|
deecf8927c | ||
|
acef56b1ea | ||
|
14d29798e3 | ||
|
5fc06c0e24 | ||
|
62be23eef5 | ||
|
57b8efb86a | ||
|
8f841767a4 | ||
|
d30c035440 | ||
|
89dc08a5cd | ||
|
fa0e2a7449 | ||
|
1de095f3bd | ||
|
b60511e3d2 | ||
|
af70a681d5 | ||
|
7adfb82076 | ||
|
d504ce64e8 | ||
|
0feb0bb6e7 | ||
|
3b5a8d5d69 | ||
|
aca9f13d45 | ||
|
e09bef98fb | ||
|
ceb590a360 | ||
|
75c0db2b15 | ||
|
70b42345c5 | ||
|
a42d780d02 | ||
|
582fa8ce45 | ||
|
73be0fb096 | ||
|
627d1a4971 | ||
|
a7807106f5 | ||
|
33b39f0725 | ||
|
53ec1141cf | ||
|
145229d46d | ||
|
568dc33a02 | ||
|
cf10450108 | ||
|
fe779686ca | ||
|
58c1dbe322 | ||
|
14b578832d | ||
|
e961398393 | ||
|
0fad2a6d8c | ||
|
f3bcada7b9 | ||
|
b814f1ccbf | ||
|
cad91df2b8 | ||
|
50a58e1ae8 | ||
|
1e36fc5d0f | ||
|
fa6a0a6b60 | ||
|
a8fc42d282 | ||
|
c6685a7f57 | ||
|
736a8c40f0 | ||
|
5f74f8c265 | ||
|
97ed569588 | ||
|
6766ef988d | ||
|
8a87cfb893 | ||
|
54aebdcb45 | ||
|
86a6145d76 | ||
|
718020b64b | ||
|
8c36a56365 | ||
|
7bccaa5c15 | ||
|
98ec46fff6 | ||
|
8dc584ece9 | ||
|
63d154dad3 | ||
|
0030bb4f1d |
@ -9,7 +9,7 @@ trim_trailing_whitespace=true
|
||||
max_line_length=120
|
||||
insert_final_newline=true
|
||||
|
||||
[.travis.yml]
|
||||
[*.{yml,sh}]
|
||||
indent_style=space
|
||||
indent_size=2
|
||||
tab_width=8
|
||||
|
557
.gitlab-ci.yml
557
.gitlab-ci.yml
@ -4,15 +4,17 @@ stages:
|
||||
- push-release
|
||||
- build
|
||||
variables:
|
||||
SIMPLECOV: "true"
|
||||
RUST_BACKTRACE: "1"
|
||||
RUSTFLAGS: ""
|
||||
CARGOFLAGS: ""
|
||||
CI_SERVER_NAME: "GitLab CI"
|
||||
LIBSSL: "libssl1.0.0 (>=1.0.0)"
|
||||
cache:
|
||||
key: "$CI_BUILD_STAGE/$CI_BUILD_REF_NAME"
|
||||
key: "$CI_BUILD_STAGE-$CI_BUILD_REF_NAME"
|
||||
paths:
|
||||
- target/
|
||||
untracked: true
|
||||
linux-stable:
|
||||
linux-ubuntu:
|
||||
stage: build
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
@ -22,78 +24,15 @@ linux-stable:
|
||||
- triggers
|
||||
script:
|
||||
- rustup default stable
|
||||
- cargo build -j $(nproc) --release --features final $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --release -p evmbin
|
||||
- cargo build -j $(nproc) --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --release -p ethkey-cli
|
||||
- strip target/release/parity
|
||||
- strip target/release/parity-evm
|
||||
- strip target/release/ethstore
|
||||
- strip target/release/ethkey
|
||||
- export SHA3=$(target/release/parity tools hash target/release/parity)
|
||||
- md5sum target/release/parity > parity.md5
|
||||
- sh scripts/deb-build.sh amd64
|
||||
- cp target/release/parity deb/usr/bin/parity
|
||||
- cp target/release/parity-evm deb/usr/bin/parity-evm
|
||||
- cp target/release/ethstore deb/usr/bin/ethstore
|
||||
- cp target/release/ethkey deb/usr/bin/ethkey
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- dpkg-deb -b deb "parity_"$VER"_amd64.deb"
|
||||
- md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/"parity_"$VER"_amd64.deb" --body "parity_"$VER"_amd64.deb"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/"parity_"$VER"_amd64.deb.md5" --body "parity_"$VER"_amd64.deb.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu
|
||||
# ARGUMENTS: 1. BUILD_PLATFORM (target for binaries) 2. PLATFORM (target for cargo) 3. ARC (architecture) 4. & 5. CC & CXX flags 6. binary identifier
|
||||
- scripts/gitlab-build.sh x86_64-unknown-linux-gnu x86_64-unknown-linux-gnu amd64 gcc g++ ubuntu
|
||||
tags:
|
||||
- rust
|
||||
- rust-stable
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
- target/release/parity-evm
|
||||
- target/release/ethstore
|
||||
- target/release/ethkey
|
||||
- parity.zip
|
||||
name: "stable-x86_64-unknown-linux-gnu_parity"
|
||||
linux-snap:
|
||||
stage: build
|
||||
image: parity/snapcraft:gitlab-ci
|
||||
only:
|
||||
- snap
|
||||
- beta
|
||||
- tags
|
||||
- triggers
|
||||
script:
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- cd snap
|
||||
- rm -rf *snap
|
||||
- sed -i 's/master/'"$VER"'/g' snapcraft.yaml
|
||||
- echo "Version:"$VER
|
||||
- snapcraft
|
||||
- ls
|
||||
- cp "parity_"$CI_BUILD"_REF_NAME_amd64.snap" "parity_"$VER"_amd64.snap"
|
||||
- md5sum "parity_"$VER"_amd64.snap" > "parity_"$VER"_amd64.snap.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/"parity_"$VER"_amd64.snap" --body "parity_"$VER"_amd64.snap"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/"parity_"$VER"_amd64.snap.md5" --body "parity_"$VER"_amd64.snap.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu
|
||||
tags:
|
||||
- rust
|
||||
- rust-stable
|
||||
artifacts:
|
||||
paths:
|
||||
- scripts/parity_*_amd64.snap
|
||||
name: "stable-x86_64-unknown-snap-gnu_parity"
|
||||
allow_failure: true
|
||||
linux-stable-debian:
|
||||
linux-debian:
|
||||
stage: build
|
||||
image: parity/rust-debian:gitlab-ci
|
||||
only:
|
||||
@ -102,81 +41,14 @@ linux-stable-debian:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- cargo build -j $(nproc) --release --features final $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --release -p evmbin
|
||||
- cargo build -j $(nproc) --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --release -p ethkey-cli
|
||||
- strip target/release/parity
|
||||
- strip target/release/parity-evm
|
||||
- strip target/release/ethstore
|
||||
- strip target/release/ethkey
|
||||
- export SHA3=$(target/release/parity tools hash target/release/parity)
|
||||
- md5sum target/release/parity > parity.md5
|
||||
- sh scripts/deb-build.sh amd64
|
||||
- cp target/release/parity deb/usr/bin/parity
|
||||
- cp target/release/parity-evm deb/usr/bin/parity-evm
|
||||
- cp target/release/ethstore deb/usr/bin/ethstore
|
||||
- cp target/release/ethkey deb/usr/bin/ethkey
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- dpkg-deb -b deb "parity_"$VER"_amd64.deb"
|
||||
- md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/parity --body target/release/parity
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/"parity_"$VER"_amd64.deb" --body "parity_"$VER"_amd64.deb"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu/"parity_"$VER"_amd64.deb.md5" --body "parity_"$VER"_amd64.deb.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/x86_64-unknown-debian-gnu
|
||||
- export LIBSSL="libssl1.1 (>=1.1.0)"
|
||||
- scripts/gitlab-build.sh x86_64-unknown-debian-gnu x86_64-unknown-linux-gnu amd64 gcc g++ debian
|
||||
tags:
|
||||
- rust
|
||||
- rust-debian
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
- parity.zip
|
||||
name: "stable-x86_64-unknown-debian-gnu_parity"
|
||||
linux-beta:
|
||||
stage: build
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- rustup default beta
|
||||
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||
- strip target/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-beta
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "beta-x86_64-unknown-linux-gnu_parity"
|
||||
allow_failure: true
|
||||
linux-nightly:
|
||||
stage: build
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
- beta
|
||||
- tags
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- rustup default nightly
|
||||
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||
- strip target/release/parity
|
||||
tags:
|
||||
- rust
|
||||
- rust-nightly
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
name: "nigthly-x86_64-unknown-linux-gnu_parity"
|
||||
allow_failure: true
|
||||
linux-centos:
|
||||
stage: build
|
||||
image: parity/rust-centos:gitlab-ci
|
||||
@ -186,42 +58,12 @@ linux-centos:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- export CXX="g++"
|
||||
- export CC="gcc"
|
||||
- export PLATFORM=x86_64-unknown-centos-gnu
|
||||
- cargo build -j $(nproc) --release --features final $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --release -p evmbin
|
||||
- cargo build -j $(nproc) --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --release -p ethkey-cli
|
||||
- strip target/release/parity
|
||||
- strip target/release/parity-evm
|
||||
- strip target/release/ethstore
|
||||
- strip target/release/ethkey
|
||||
- md5sum target/release/parity > parity.md5
|
||||
- md5sum target/release/parity-evm > parity-evm.md5
|
||||
- md5sum target/release/ethstore > ethstore.md5
|
||||
- md5sum target/release/ethkey > ethkey.md5
|
||||
- export SHA3=$(target/release/parity tools hash target/release/parity)
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity-evm --body target/release/parity-evm
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity-evm.md5 --body parity-evm.md5
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/ethstore --body target/release/ethstore
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/ethstore.md5 --body ethstore.md5
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/ethkey --body target/release/ethkey
|
||||
- aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/ethkey.md5 --body ethkey.md5
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- scripts/gitlab-build.sh x86_64-unknown-centos-gnu x86_64-unknown-linux-gnu x86_64 gcc g++ centos
|
||||
tags:
|
||||
- rust
|
||||
- rust-centos
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
- parity.zip
|
||||
name: "x86_64-unknown-centos-gnu_parity"
|
||||
linux-i686:
|
||||
stage: build
|
||||
@ -232,45 +74,12 @@ linux-i686:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- export HOST_CC=gcc
|
||||
- export HOST_CXX=g++
|
||||
- export COMMIT=$(git rev-parse HEAD)
|
||||
- export PLATFORM=i686-unknown-linux-gnu
|
||||
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p evmbin
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethkey-cli
|
||||
- strip target/$PLATFORM/release/parity
|
||||
- strip target/$PLATFORM/release/parity-evm
|
||||
- strip target/$PLATFORM/release/ethstore
|
||||
- strip target/$PLATFORM/release/ethkey
|
||||
- strip target/$PLATFORM/release/parity
|
||||
- md5sum target/$PLATFORM/release/parity > parity.md5
|
||||
- export SHA3=$(target/$PLATFORM/release/parity tools hash target/$PLATFORM/release/parity)
|
||||
- sh scripts/deb-build.sh i386
|
||||
- cp target/$PLATFORM/release/parity deb/usr/bin/parity
|
||||
- cp target/$PLATFORM/release/parity-evm deb/usr/bin/parity-evm
|
||||
- cp target/$PLATFORM/release/ethstore deb/usr/bin/ethstore
|
||||
- cp target/$PLATFORM/release/ethkey deb/usr/bin/ethkey
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- dpkg-deb -b deb "parity_"$VER"_i386.deb"
|
||||
- md5sum "parity_"$VER"_i386.deb" > "parity_"$VER"_i386.deb.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_i386.deb" --body "parity_"$VER"_i386.deb"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_i386.deb.md5" --body "parity_"$VER"_i386.deb.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- scripts/gitlab-build.sh i686-unknown-linux-gnu i686-unknown-linux-gnu i386 gcc g++ ubuntu
|
||||
tags:
|
||||
- rust
|
||||
- rust-i686
|
||||
artifacts:
|
||||
paths:
|
||||
- target/i686-unknown-linux-gnu/release/parity
|
||||
- parity.zip
|
||||
name: "i686-unknown-linux-gnu"
|
||||
allow_failure: true
|
||||
linux-armv7:
|
||||
@ -282,53 +91,12 @@ linux-armv7:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- export CC=arm-linux-gnueabihf-gcc
|
||||
- export CXX=arm-linux-gnueabihf-g++
|
||||
- export HOST_CC=gcc
|
||||
- export HOST_CXX=g++
|
||||
- export PLATFORM=armv7-unknown-linux-gnueabihf
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.$PLATFORM]" >> .cargo/config
|
||||
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
|
||||
- cat .cargo/config
|
||||
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p evmbin
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethkey-cli
|
||||
- md5sum target/$PLATFORM/release/parity > parity.md5
|
||||
- export SHA3=$(target/$PLATFORM/release/parity tools hash target/$PLATFORM/release/parity)
|
||||
- sh scripts/deb-build.sh i386
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/parity
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/parity-evm
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/ethstore
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/ethkey
|
||||
- export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
|
||||
- md5sum target/$PLATFORM/release/parity > parity.md5
|
||||
- sh scripts/deb-build.sh armhf
|
||||
- cp target/$PLATFORM/release/parity deb/usr/bin/parity
|
||||
- cp target/$PLATFORM/release/parity-evm deb/usr/bin/parity-evm
|
||||
- cp target/$PLATFORM/release/ethstore deb/usr/bin/ethstore
|
||||
- cp target/$PLATFORM/release/ethkey deb/usr/bin/ethkey
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- dpkg-deb -b deb "parity_"$VER"_armhf.deb"
|
||||
- md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_armhf.deb" --body "parity_"$VER"_armhf.deb"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_armhf.deb.md5" --body "parity_"$VER"_armhf.deb.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- scripts/gitlab-build.sh armv7-unknown-linux-gnueabihf armv7-unknown-linux-gnueabihf armhf arm-linux-gnueabihf-gcc arm-linux-gnueabihf-g++ ubuntu
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/armv7-unknown-linux-gnueabihf/release/parity
|
||||
- parity.zip
|
||||
name: "armv7_unknown_linux_gnueabihf_parity"
|
||||
allow_failure: true
|
||||
linux-arm:
|
||||
@ -340,50 +108,12 @@ linux-arm:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- export CC=arm-linux-gnueabihf-gcc
|
||||
- export CXX=arm-linux-gnueabihf-g++
|
||||
- export HOST_CC=gcc
|
||||
- export HOST_CXX=g++
|
||||
- export PLATFORM=arm-unknown-linux-gnueabihf
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.$PLATFORM]" >> .cargo/config
|
||||
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
|
||||
- cat .cargo/config
|
||||
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p evmbin
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethkey-cli
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/parity
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/parity-evm
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/ethstore
|
||||
- arm-linux-gnueabihf-strip target/$PLATFORM/release/ethkey
|
||||
- export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
|
||||
- md5sum target/$PLATFORM/release/parity > parity.md5
|
||||
- sh scripts/deb-build.sh armhf
|
||||
- cp target/$PLATFORM/release/parity deb/usr/bin/parity
|
||||
- cp target/$PLATFORM/release/parity-evm deb/usr/bin/parity-evm
|
||||
- cp target/$PLATFORM/release/ethstore deb/usr/bin/ethstore
|
||||
- cp target/$PLATFORM/release/ethkey deb/usr/bin/ethkey
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- dpkg-deb -b deb "parity_"$VER"_armhf.deb"
|
||||
- md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/$PLATFORM/release/parity
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_armhf.deb" --body "parity_"$VER"_armhf.deb"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_armhf.deb.md5" --body "parity_"$VER"_armhf.deb.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- scripts/gitlab-build.sh arm-unknown-linux-gnueabihf arm-unknown-linux-gnueabihf armhf arm-linux-gnueabihf-gcc arm-linux-gnueabihf-g++ ubuntu
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/arm-unknown-linux-gnueabihf/release/parity
|
||||
- parity.zip
|
||||
name: "arm-unknown-linux-gnueabihf_parity"
|
||||
allow_failure: true
|
||||
linux-aarch64:
|
||||
@ -395,50 +125,29 @@ linux-aarch64:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- export CC=aarch64-linux-gnu-gcc
|
||||
- export CXX=aarch64-linux-gnu-g++
|
||||
- export HOST_CC=gcc
|
||||
- export HOST_CXX=g++
|
||||
- export PLATFORM=aarch64-unknown-linux-gnu
|
||||
- rm -rf .cargo
|
||||
- mkdir -p .cargo
|
||||
- echo "[target.$PLATFORM]" >> .cargo/config
|
||||
- echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config
|
||||
- cat .cargo/config
|
||||
- cargo build -j $(nproc) --target $PLATFORM --features final --release $CARGOFLAGS
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p evmbin
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethstore-cli
|
||||
- cargo build -j $(nproc) --target $PLATFORM --release -p ethkey-cli
|
||||
- aarch64-linux-gnu-strip target/$PLATFORM/release/parity
|
||||
- aarch64-linux-gnu-strip target/$PLATFORM/release/parity-evm
|
||||
- aarch64-linux-gnu-strip target/$PLATFORM/release/ethstore
|
||||
- aarch64-linux-gnu-strip target/$PLATFORM/release/ethkey
|
||||
- export SHA3=$(rhash --sha3-256 target/$PLATFORM/release/parity -p %h)
|
||||
- md5sum target/$PLATFORM/release/parity > parity.md5
|
||||
- sh scripts/deb-build.sh arm64
|
||||
- cp target/$PLATFORM/release/parity deb/usr/bin/parity
|
||||
- cp target/$PLATFORM/release/parity-evm deb/usr/bin/parity-evm
|
||||
- cp target/$PLATFORM/release/ethstore deb/usr/bin/ethstore
|
||||
- cp target/$PLATFORM/release/ethkey deb/usr/bin/ethkey
|
||||
- export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
- dpkg-deb -b deb "parity_"$VER"_arm64.deb"
|
||||
- md5sum "parity_"$VER"_arm64.deb" > "parity_"$VER"_arm64.deb.md5"
|
||||
- aws configure set aws_access_key_id $s3_key
|
||||
- aws configure set aws_secret_access_key $s3_secret
|
||||
- if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
- aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_arm64.deb" --body "parity_"$VER"_arm64.deb"
|
||||
- aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity_"$VER"_arm64.deb.md5" --body "parity_"$VER"_arm64.deb.md5"
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
- scripts/gitlab-build.sh aarch64-unknown-linux-gnu aarch64-unknown-linux-gnu arm64 aarch64-linux-gnu-gcc aarch64-linux-gnu-g++ ubuntu
|
||||
tags:
|
||||
- rust
|
||||
- rust-arm
|
||||
artifacts:
|
||||
paths:
|
||||
- target/aarch64-unknown-linux-gnu/release/parity
|
||||
- parity.zip
|
||||
name: "aarch64-unknown-linux-gnu_parity"
|
||||
linux-snap:
|
||||
stage: build
|
||||
image: parity/snapcraft:gitlab-ci
|
||||
only:
|
||||
- stable
|
||||
- beta
|
||||
- tags
|
||||
- triggers
|
||||
script:
|
||||
- scripts/gitlab-build.sh x86_64-unknown-snap-gnu x86_64-unknown-linux-gnu amd64 gcc g++ snap
|
||||
tags:
|
||||
- rust-stable
|
||||
artifacts:
|
||||
paths:
|
||||
- parity.zip
|
||||
name: "stable-x86_64-unknown-snap-gnu_parity"
|
||||
allow_failure: true
|
||||
darwin:
|
||||
stage: build
|
||||
@ -447,45 +156,17 @@ darwin:
|
||||
- tags
|
||||
- stable
|
||||
- triggers
|
||||
script: |
|
||||
export COMMIT=$(git rev-parse HEAD)
|
||||
export PLATFORM=x86_64-apple-darwin
|
||||
rustup default stable
|
||||
cargo clean
|
||||
cargo build -j 8 --features final --release #$CARGOFLAGS
|
||||
cargo build -j 8 --release -p ethstore-cli #$CARGOFLAGS
|
||||
cargo build -j 8 --release -p ethkey-cli #$CARGOFLAGS
|
||||
cargo build -j 8 --release -p evmbin #$CARGOFLAGS
|
||||
rm -rf parity.md5
|
||||
md5sum target/release/parity > parity.md5
|
||||
export SHA3=$(target/release/parity tools hash target/release/parity)
|
||||
cd mac
|
||||
xcodebuild -configuration Release
|
||||
cd ..
|
||||
packagesbuild -v mac/Parity.pkgproj
|
||||
productsign --sign 'Developer ID Installer: PARITY TECHNOLOGIES LIMITED (P2PX3JU8FT)' target/release/Parity\ Ethereum.pkg target/release/Parity\ Ethereum-signed.pkg
|
||||
export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
|
||||
mv target/release/Parity\ Ethereum-signed.pkg "parity-"$VER"-macos-installer.pkg"
|
||||
md5sum "parity-"$VER"-macos-installer.pkg" >> "parity-"$VER"-macos-installer.pkg.md5"
|
||||
aws configure set aws_access_key_id $s3_key
|
||||
aws configure set aws_secret_access_key $s3_secret
|
||||
if [[ $CI_BUILD_REF_NAME =~ ^(master|beta|stable|nightly)$ ]]; then export S3_BUCKET=builds-parity-published; else export S3_BUCKET=builds-parity; fi
|
||||
aws s3 rm --recursive s3://$S3_BUCKET/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity --body target/release/parity
|
||||
aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/parity.md5 --body parity.md5
|
||||
aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity-"$VER"-macos-installer.pkg" --body "parity-"$VER"-macos-installer.pkg"
|
||||
aws s3api put-object --bucket $S3_BUCKET --key $CI_BUILD_REF_NAME/$PLATFORM/"parity-"$VER"-macos-installer.pkg.md5" --body "parity-"$VER"-macos-installer.pkg.md5"
|
||||
curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1337/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
curl --data "commit=$CI_BUILD_REF&sha3=$SHA3&filename=parity&secret=$RELEASES_SECRET" http://update.parity.io:1338/push-build/$CI_BUILD_REF_NAME/$PLATFORM
|
||||
script:
|
||||
- scripts/gitlab-build.sh x86_64-apple-darwin x86_64-apple-darwin macos gcc g++ macos
|
||||
tags:
|
||||
- osx
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity
|
||||
- parity.zip
|
||||
name: "x86_64-apple-darwin_parity"
|
||||
windows:
|
||||
cache:
|
||||
key: "%CI_BUILD_STAGE%/%CI_BUILD_REF_NAME%"
|
||||
key: "%CI_BUILD_STAGE%-%CI_BUILD_REF_NAME%"
|
||||
untracked: true
|
||||
stage: build
|
||||
only:
|
||||
@ -494,62 +175,12 @@ windows:
|
||||
- stable
|
||||
- triggers
|
||||
script:
|
||||
- set PLATFORM=x86_64-pc-windows-msvc
|
||||
- set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt
|
||||
- set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64
|
||||
- set RUST_BACKTRACE=1
|
||||
- set RUSTFLAGS=%RUSTFLAGS%
|
||||
- rustup default stable-x86_64-pc-windows-msvc
|
||||
- cargo clean
|
||||
- cargo build --features final --release #%CARGOFLAGS%
|
||||
- cargo build --release -p ethstore-cli #%CARGOFLAGS%
|
||||
- cargo build --release -p ethkey-cli #%CARGOFLAGS%
|
||||
- cargo build --release -p evmbin #%CARGOFLAGS%
|
||||
- signtool sign /f %keyfile% /p %certpass% target\release\parity.exe
|
||||
- target\release\parity.exe tools hash target\release\parity.exe > parity.sha3
|
||||
- set /P SHA3=<parity.sha3
|
||||
- curl -sL --url "https://github.com/paritytech/win-build/raw/master/SimpleFC.dll" -o nsis\SimpleFC.dll
|
||||
- curl -sL --url "https://github.com/paritytech/win-build/raw/master/vc_redist.x64.exe" -o nsis\vc_redist.x64.exe
|
||||
- msbuild windows\ptray\ptray.vcxproj /p:Platform=x64 /p:Configuration=Release
|
||||
- signtool sign /f %keyfile% /p %certpass% windows\ptray\x64\release\ptray.exe
|
||||
- cd nsis
|
||||
- makensis.exe installer.nsi
|
||||
- copy installer.exe InstallParity.exe
|
||||
- signtool sign /f %keyfile% /p %certpass% InstallParity.exe
|
||||
- md5sums InstallParity.exe > InstallParity.exe.md5
|
||||
- zip win-installer.zip InstallParity.exe InstallParity.exe.md5
|
||||
- md5sums win-installer.zip > win-installer.zip.md5
|
||||
- cd ..\target\release\
|
||||
- md5sums parity.exe > parity.exe.md5
|
||||
- zip parity.zip parity.exe parity.md5
|
||||
- md5sums parity.zip > parity.zip.md5
|
||||
- cd ..\..
|
||||
- aws configure set aws_access_key_id %s3_key%
|
||||
- aws configure set aws_secret_access_key %s3_secret%
|
||||
- echo %CI_BUILD_REF_NAME%
|
||||
- echo %CI_BUILD_REF_NAME% | findstr /R "master" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
|
||||
- echo %CI_BUILD_REF_NAME% | findstr /R "beta" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
|
||||
- echo %CI_BUILD_REF_NAME% | findstr /R "stable" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
|
||||
- echo %CI_BUILD_REF_NAME% | findstr /R "nightly" >nul 2>&1 && set S3_BUCKET=builds-parity-published|| set S3_BUCKET=builds-parity
|
||||
- echo %S3_BUCKET%
|
||||
- aws s3 rm --recursive s3://%S3_BUCKET%/%CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.exe --body target\release\parity.exe
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.exe.md5 --body target\release\parity.exe.md5
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.zip --body target\release\parity.zip
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity.zip.md5 --body target\release\parity.zip.md5
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/InstallParity.exe --body nsis\InstallParity.exe
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/InstallParity.exe.md5 --body nsis\InstallParity.exe.md5
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/win-installer.zip --body nsis\win-installer.zip
|
||||
- aws s3api put-object --bucket %S3_BUCKET% --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/win-installer.zip.md5 --body nsis\win-installer.zip.md5
|
||||
- curl --data "commit=%CI_BUILD_REF%&sha3=%SHA3%&filename=parity.exe&secret=%RELEASES_SECRET%" http://update.parity.io:1337/push-build/%CI_BUILD_REF_NAME%/%PLATFORM%
|
||||
- curl --data "commit=%CI_BUILD_REF%&sha3=%SHA3%&filename=parity.exe&secret=%RELEASES_SECRET%" http://update.parity.io:1338/push-build/%CI_BUILD_REF_NAME%/%PLATFORM%
|
||||
- sh scripts/gitlab-build.sh x86_64-pc-windows-msvc x86_64-pc-windows-msvc installer "" "" windows
|
||||
tags:
|
||||
- rust-windows
|
||||
artifacts:
|
||||
paths:
|
||||
- target/release/parity.exe
|
||||
- target/release/parity.pdb
|
||||
- nsis/InstallParity.exe
|
||||
- parity.zip
|
||||
name: "x86_64-pc-windows-msvc_parity"
|
||||
docker-build:
|
||||
stage: build
|
||||
@ -562,7 +193,7 @@ docker-build:
|
||||
- if [ "$CI_BUILD_REF_NAME" == "beta-release" ]; then DOCKER_TAG="latest"; else DOCKER_TAG=$CI_BUILD_REF_NAME; fi
|
||||
- echo "Tag:" $DOCKER_TAG
|
||||
- docker login -u $Docker_Hub_User_Parity -p $Docker_Hub_Pass_Parity
|
||||
- sh scripts/docker-build.sh $DOCKER_TAG
|
||||
- scripts/docker-build.sh $DOCKER_TAG
|
||||
- docker logout
|
||||
tags:
|
||||
- docker
|
||||
@ -571,63 +202,16 @@ test-coverage:
|
||||
only:
|
||||
- master
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- rm -rf target/*
|
||||
- rm -rf js/.coverage
|
||||
- scripts/cov.sh
|
||||
# - COVERAGE=$(grep -Po 'covered":.*?[^\\]"' target/cov/index.json | grep "[0-9]*\.[0-9]" -o)
|
||||
# - echo "Coverage:" $COVERAGE
|
||||
- scripts/gitlab-test.sh test-coverage
|
||||
tags:
|
||||
- kcov
|
||||
allow_failure: true
|
||||
test-darwin:
|
||||
stage: test
|
||||
only:
|
||||
- triggers
|
||||
variables:
|
||||
RUST_BACKTRACE: 1
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
|
||||
tags:
|
||||
- osx
|
||||
allow_failure: true
|
||||
test-windows:
|
||||
stage: test
|
||||
only:
|
||||
- triggers
|
||||
variables:
|
||||
RUST_BACKTRACE: 1
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- echo cargo test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p parity-dapps -p parity-rpc -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity-rpc-client -p parity %CARGOFLAGS% --verbose --release
|
||||
tags:
|
||||
- rust-windows
|
||||
allow_failure: true
|
||||
test-rust-stable:
|
||||
stage: test
|
||||
image: parity/rust:gitlab-ci
|
||||
variables:
|
||||
RUST_BACKTRACE: 1
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- rustup show
|
||||
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
|
||||
- if [ "$CI_BUILD_REF_NAME" == "nightly" ]; then sh scripts/aura-test.sh; fi
|
||||
- scripts/gitlab-test.sh stable
|
||||
tags:
|
||||
- rust
|
||||
- rust-stable
|
||||
js-test:
|
||||
stage: test
|
||||
image: parity/rust:gitlab-ci
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi
|
||||
- if [ $JS_OLD_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS (old) deps install since no JS files modified."; else ./js-old/scripts/install-deps.sh;fi
|
||||
- if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS lint since no JS files modified."; else ./js/scripts/lint.sh && ./js/scripts/test.sh && ./js/scripts/build.sh; fi
|
||||
- if [ $JS_OLD_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS (old) lint since no JS files modified."; else ./js-old/scripts/lint.sh && ./js-old/scripts/test.sh && ./js-old/scripts/build.sh; fi
|
||||
tags:
|
||||
- rust
|
||||
- rust-stable
|
||||
test-rust-beta:
|
||||
stage: test
|
||||
@ -635,14 +219,9 @@ test-rust-beta:
|
||||
- triggers
|
||||
- master
|
||||
image: parity/rust:gitlab-ci
|
||||
variables:
|
||||
RUST_BACKTRACE: 1
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- rustup default beta
|
||||
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
|
||||
- scripts/gitlab-test.sh beta
|
||||
tags:
|
||||
- rust
|
||||
- rust-beta
|
||||
allow_failure: true
|
||||
test-rust-nightly:
|
||||
@ -651,36 +230,30 @@ test-rust-nightly:
|
||||
- triggers
|
||||
- master
|
||||
image: parity/rust:gitlab-ci
|
||||
variables:
|
||||
RUST_BACKTRACE: 1
|
||||
script:
|
||||
- git submodule update --init --recursive
|
||||
- rustup default nightly
|
||||
- if [ $RUST_FILES_MODIFIED -eq 0 ]; then echo "Skipping Rust tests since no Rust files modified."; else ./test.sh $CARGOFLAGS; fi
|
||||
- scripts/gitlab-test.sh nightly
|
||||
tags:
|
||||
- rust
|
||||
- rust-nightly
|
||||
allow_failure: true
|
||||
js-test:
|
||||
stage: test
|
||||
image: parity/rust:gitlab-ci
|
||||
script:
|
||||
- scripts/gitlab-test.sh js-test
|
||||
tags:
|
||||
- rust-stable
|
||||
js-release:
|
||||
stage: js-build
|
||||
only:
|
||||
- master
|
||||
- beta
|
||||
- stable
|
||||
- beta
|
||||
- tags
|
||||
- triggers
|
||||
image: parity/rust:gitlab-ci
|
||||
script:
|
||||
- rustup default stable
|
||||
- echo $JS_FILES_MODIFIED
|
||||
- if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS deps install since no JS files modified."; else ./js/scripts/install-deps.sh;fi
|
||||
- if [ $JS_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS rebuild since no JS files modified."; else ./js/scripts/build.sh && ./js/scripts/push-precompiled.sh; fi
|
||||
|
||||
- echo $JS_OLD_FILES_MODIFIED
|
||||
- if [ $JS_OLD_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS (old) deps install since no JS files modified."; else ./js-old/scripts/install-deps.sh;fi
|
||||
- if [ $JS_OLD_FILES_MODIFIED -eq 0 ]; then echo "Skipping JS (old) rebuild since no JS files modified."; else ./js-old/scripts/build.sh && ./js-old/scripts/push-precompiled.sh; fi
|
||||
|
||||
- if [ $JS_FILES_MODIFIED -eq 0 ] && [ $JS_OLD_FILES_MODIFIED -eq 0 ]; then echo "Skipping Cargo update since no JS files modified."; else ./js/scripts/push-cargo.sh; fi
|
||||
- scripts/gitlab-test.sh js-release
|
||||
tags:
|
||||
- javascript
|
||||
push-release:
|
||||
@ -690,18 +263,6 @@ push-release:
|
||||
- triggers
|
||||
image: parity/rust:gitlab-ci
|
||||
script:
|
||||
- rustup default stable
|
||||
- curl --data "secret=$RELEASES_SECRET" http://update.parity.io:1337/push-release/$CI_BUILD_REF_NAME/$CI_BUILD_REF
|
||||
- curl --data "secret=$RELEASES_SECRET" http://update.parity.io:1338/push-release/$CI_BUILD_REF_NAME/$CI_BUILD_REF
|
||||
- scripts/gitlab-push-release.sh
|
||||
tags:
|
||||
- curl
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
.functions: &functions |
|
||||
export JS_FILES_MODIFIED=$(git --no-pager diff --name-only master...$CI_BUILD_REF | grep ^js/ | wc -l)
|
||||
export JS_OLD_FILES_MODIFIED=$(git --no-pager diff --name-only master...$CI_BUILD_REF | grep ^js-old/ | wc -l)
|
||||
export RUST_FILES_MODIFIED=$(git --no-pager diff --name-only master...$CI_BUILD_REF | grep -v -e ^js -e ^\\. -e ^LICENSE -e ^README.md -e ^test.sh -e ^windows/ -e ^scripts/ -e^mac/ -e ^nsis/ | wc -l)
|
||||
|
||||
before_script:
|
||||
- *functions
|
||||
|
||||
|
572
Cargo.lock
generated
572
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
description = "Parity Ethereum client"
|
||||
name = "parity"
|
||||
version = "1.9.0"
|
||||
version = "1.9.7"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
build = "build.rs"
|
||||
@ -12,6 +12,8 @@ env_logger = "0.4"
|
||||
rustc-hex = "1.0"
|
||||
docopt = "0.8"
|
||||
clap = "2"
|
||||
term_size = "0.3"
|
||||
textwrap = "0.9"
|
||||
time = "0.1"
|
||||
num_cpus = "1.2"
|
||||
number_prefix = "0.2"
|
||||
@ -25,7 +27,6 @@ toml = "0.4"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
serde_derive = "1.0"
|
||||
app_dirs = "1.1.1"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
fdlimit = "0.1"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# [Parity](https://parity.io/) - fast, light, and robust Ethereum client
|
||||
# [Parity](https://parity.io/) - fast, light, and robust Ethereum client
|
||||
|
||||
[](https://gitlab.parity.io/parity/parity/commits/master)
|
||||
[](https://build.snapcraft.io/user/paritytech/parity)
|
||||
|
@ -14,12 +14,10 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use endpoint::EndpointInfo;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct App {
|
||||
pub id: String,
|
||||
pub id: Option<String>,
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub version: String,
|
||||
@ -28,32 +26,14 @@ pub struct App {
|
||||
pub icon_url: String,
|
||||
#[serde(rename="localUrl")]
|
||||
pub local_url: Option<String>,
|
||||
#[serde(rename="allowJsEval")]
|
||||
pub allow_js_eval: Option<bool>,
|
||||
}
|
||||
|
||||
impl App {
|
||||
/// Creates `App` instance from `EndpointInfo` and `id`.
|
||||
pub fn from_info(id: &str, info: &EndpointInfo) -> Self {
|
||||
App {
|
||||
id: id.to_owned(),
|
||||
name: info.name.to_owned(),
|
||||
description: info.description.to_owned(),
|
||||
version: info.version.to_owned(),
|
||||
author: info.author.to_owned(),
|
||||
icon_url: info.icon_url.to_owned(),
|
||||
local_url: info.local_url.to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<EndpointInfo> for App {
|
||||
fn into(self) -> EndpointInfo {
|
||||
EndpointInfo {
|
||||
name: self.name,
|
||||
description: self.description,
|
||||
version: self.version,
|
||||
author: self.author,
|
||||
icon_url: self.icon_url,
|
||||
local_url: self.local_url,
|
||||
}
|
||||
pub fn with_id(&self, id: &str) -> Self {
|
||||
let mut app = self.clone();
|
||||
app.id = Some(id.into());
|
||||
app
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ impl ContentValidator for Dapp {
|
||||
// First find manifest file
|
||||
let (mut manifest, manifest_dir) = Self::find_manifest(&mut zip)?;
|
||||
// Overwrite id to match hash
|
||||
manifest.id = id;
|
||||
manifest.id = Some(id);
|
||||
|
||||
// Unpack zip
|
||||
for i in 0..zip.len() {
|
||||
|
@ -319,12 +319,14 @@ mod tests {
|
||||
).allow_dapps(true);
|
||||
|
||||
let handler = local::Dapp::new(pool, path, EndpointInfo {
|
||||
id: None,
|
||||
name: "fake".into(),
|
||||
description: "".into(),
|
||||
version: "".into(),
|
||||
author: "".into(),
|
||||
icon_url: "".into(),
|
||||
local_url: Some("".into()),
|
||||
allow_js_eval: None,
|
||||
}, Default::default(), None);
|
||||
|
||||
// when
|
||||
|
@ -46,17 +46,18 @@ fn read_manifest(name: &str, mut path: PathBuf) -> EndpointInfo {
|
||||
// Try to deserialize manifest
|
||||
deserialize_manifest(s)
|
||||
})
|
||||
.map(Into::into)
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(target: "dapps", "Cannot read manifest file at: {:?}. Error: {:?}", path, e);
|
||||
|
||||
EndpointInfo {
|
||||
id: None,
|
||||
name: name.into(),
|
||||
description: name.into(),
|
||||
version: "0.0.0".into(),
|
||||
author: "?".into(),
|
||||
icon_url: "icon.png".into(),
|
||||
local_url: None,
|
||||
allow_js_eval: Some(false),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -20,8 +20,13 @@ pub use apps::App as Manifest;
|
||||
pub const MANIFEST_FILENAME: &'static str = "manifest.json";
|
||||
|
||||
pub fn deserialize_manifest(manifest: String) -> Result<Manifest, String> {
|
||||
serde_json::from_str::<Manifest>(&manifest).map_err(|e| format!("{:?}", e))
|
||||
// TODO [todr] Manifest validation (especialy: id (used as path))
|
||||
let mut manifest = serde_json::from_str::<Manifest>(&manifest).map_err(|e| format!("{:?}", e))?;
|
||||
if manifest.id.is_none() {
|
||||
return Err("App 'id' is missing.".into());
|
||||
}
|
||||
manifest.allow_js_eval = Some(manifest.allow_js_eval.unwrap_or(false));
|
||||
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
pub fn serialize_manifest(manifest: &Manifest) -> Result<String, String> {
|
||||
|
@ -44,7 +44,7 @@ pub const WEB_PATH: &'static str = "web";
|
||||
pub const URL_REFERER: &'static str = "__referer=";
|
||||
|
||||
pub fn utils(pool: CpuPool) -> Box<Endpoint> {
|
||||
Box::new(page::builtin::Dapp::new(pool, parity_ui::App::default()))
|
||||
Box::new(page::builtin::Dapp::new(pool, false, parity_ui::App::default()))
|
||||
}
|
||||
|
||||
pub fn ui(pool: CpuPool) -> Box<Endpoint> {
|
||||
@ -76,9 +76,9 @@ pub fn all_endpoints<F: Fetch>(
|
||||
}
|
||||
|
||||
// NOTE [ToDr] Dapps will be currently embeded on 8180
|
||||
insert::<parity_ui::App>(&mut pages, "ui", Embeddable::Yes(embeddable.clone()), pool.clone());
|
||||
insert::<parity_ui::App>(&mut pages, "ui", Embeddable::Yes(embeddable.clone()), pool.clone(), true);
|
||||
// old version
|
||||
insert::<parity_ui::old::App>(&mut pages, "v1", Embeddable::Yes(embeddable.clone()), pool.clone());
|
||||
insert::<parity_ui::old::App>(&mut pages, "v1", Embeddable::Yes(embeddable.clone()), pool.clone(), true);
|
||||
|
||||
pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned()));
|
||||
pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), fetch.clone()));
|
||||
@ -86,10 +86,16 @@ pub fn all_endpoints<F: Fetch>(
|
||||
(local_endpoints, pages)
|
||||
}
|
||||
|
||||
fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str, embed_at: Embeddable, pool: CpuPool) {
|
||||
fn insert<T : WebApp + Default + 'static>(
|
||||
pages: &mut Endpoints,
|
||||
id: &str,
|
||||
embed_at: Embeddable,
|
||||
pool: CpuPool,
|
||||
allow_js_eval: bool,
|
||||
) {
|
||||
pages.insert(id.to_owned(), Box::new(match embed_at {
|
||||
Embeddable::Yes(address) => page::builtin::Dapp::new_safe_to_embed(pool, T::default(), address),
|
||||
Embeddable::No => page::builtin::Dapp::new(pool, T::default()),
|
||||
Embeddable::Yes(address) => page::builtin::Dapp::new_safe_to_embed(pool, allow_js_eval, T::default(), address),
|
||||
Embeddable::No => page::builtin::Dapp::new(pool, allow_js_eval, T::default()),
|
||||
}));
|
||||
}
|
||||
|
||||
|
@ -37,16 +37,7 @@ impl EndpointPath {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct EndpointInfo {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub version: String,
|
||||
pub author: String,
|
||||
pub icon_url: String,
|
||||
pub local_url: Option<String>,
|
||||
}
|
||||
|
||||
pub type EndpointInfo = ::apps::App;
|
||||
pub type Endpoints = BTreeMap<String, Box<Endpoint>>;
|
||||
pub type Response = Box<Future<Item=hyper::Response, Error=hyper::Error> + Send>;
|
||||
pub type Request = hyper::Request;
|
||||
|
@ -82,7 +82,7 @@ impl Into<hyper::Response> for ContentHandler {
|
||||
.with_status(self.code)
|
||||
.with_header(header::ContentType(self.mimetype))
|
||||
.with_body(self.content);
|
||||
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on);
|
||||
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on, false);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ impl Into<hyper::Response> for EchoHandler {
|
||||
.with_header(content_type.unwrap_or(header::ContentType::json()))
|
||||
.with_body(self.request.body());
|
||||
|
||||
add_security_headers(res.headers_mut(), None);
|
||||
add_security_headers(res.headers_mut(), None, false);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ use hyper::header;
|
||||
use {apps, address, Embeddable};
|
||||
|
||||
/// Adds security-related headers to the Response.
|
||||
pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embeddable) {
|
||||
pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embeddable, allow_js_eval: bool) {
|
||||
headers.set_raw("X-XSS-Protection", "1; mode=block");
|
||||
headers.set_raw("X-Content-Type-Options", "nosniff");
|
||||
|
||||
@ -47,6 +47,8 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
|
||||
|
||||
// Content Security Policy headers
|
||||
headers.set_raw("Content-Security-Policy", String::new()
|
||||
// Restrict everything to the same origin by default.
|
||||
+ "default-src 'self';"
|
||||
// Allow connecting to WS servers and HTTP(S) servers.
|
||||
// We could be more restrictive and allow only RPC server URL.
|
||||
+ "connect-src http: https: ws: wss:;"
|
||||
@ -64,26 +66,29 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
|
||||
+ "style-src 'self' 'unsafe-inline' data: blob: https:;"
|
||||
// Allow fonts from data: and HTTPS.
|
||||
+ "font-src 'self' data: https:;"
|
||||
// Allow inline scripts and scripts eval (webpack/jsconsole)
|
||||
// Disallow objects
|
||||
+ "object-src 'none';"
|
||||
// Allow scripts
|
||||
+ {
|
||||
let script_src = embeddable_on.as_ref()
|
||||
.map(|e| e.extra_script_src.iter()
|
||||
.map(|&(ref host, port)| address(host, port))
|
||||
.join(" ")
|
||||
).unwrap_or_default();
|
||||
let eval = if allow_js_eval { " 'unsafe-eval'" } else { "" };
|
||||
|
||||
&format!(
|
||||
"script-src 'self' 'unsafe-inline' 'unsafe-eval' {};",
|
||||
script_src
|
||||
"script-src 'self' {}{};",
|
||||
script_src,
|
||||
eval
|
||||
)
|
||||
}
|
||||
// Same restrictions as script-src with additional
|
||||
// blob: that is required for camera access (worker)
|
||||
+ "worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;"
|
||||
// Restrict everything else to the same origin.
|
||||
+ "default-src 'self';"
|
||||
+ "worker-src 'self' https: blob:;"
|
||||
// Run in sandbox mode (although it's not fully safe since we allow same-origin and script)
|
||||
+ "sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;"
|
||||
// Disallow subitting forms from any dapps
|
||||
// Disallow submitting forms from any dapps
|
||||
+ "form-action 'none';"
|
||||
// Never allow mixed content
|
||||
+ "block-all-mixed-content;"
|
||||
|
@ -51,7 +51,7 @@ impl<R: io::Read> StreamingHandler<R> {
|
||||
.with_status(self.status)
|
||||
.with_header(header::ContentType(self.mimetype))
|
||||
.with_body(body);
|
||||
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on);
|
||||
add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on, false);
|
||||
|
||||
(reader, res)
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ impl Endpoints {
|
||||
/// Returns a current list of app endpoints.
|
||||
pub fn list(&self) -> Vec<apps::App> {
|
||||
self.endpoints.read().iter().filter_map(|(ref k, ref e)| {
|
||||
e.info().map(|ref info| apps::App::from_info(k, info))
|
||||
e.info().map(|ref info| info.with_id(k))
|
||||
}).collect()
|
||||
}
|
||||
|
||||
|
@ -38,13 +38,14 @@ pub struct Dapp<T: WebApp + 'static> {
|
||||
|
||||
impl<T: WebApp + 'static> Dapp<T> {
|
||||
/// Creates new `Dapp` for builtin (compile time) Dapp.
|
||||
pub fn new(pool: CpuPool, app: T) -> Self {
|
||||
let info = app.info();
|
||||
pub fn new(pool: CpuPool, allow_js_eval: bool, app: T) -> Self {
|
||||
let mut info = EndpointInfo::from(app.info());
|
||||
info.allow_js_eval = Some(allow_js_eval);
|
||||
Dapp {
|
||||
pool,
|
||||
app,
|
||||
safe_to_embed_on: None,
|
||||
info: EndpointInfo::from(info),
|
||||
info,
|
||||
fallback_to_index_html: false,
|
||||
}
|
||||
}
|
||||
@ -65,13 +66,14 @@ impl<T: WebApp + 'static> Dapp<T> {
|
||||
/// Creates new `Dapp` which can be safely used in iframe
|
||||
/// even from different origin. It might be dangerous (clickjacking).
|
||||
/// Use wisely!
|
||||
pub fn new_safe_to_embed(pool: CpuPool, app: T, address: Embeddable) -> Self {
|
||||
let info = app.info();
|
||||
pub fn new_safe_to_embed(pool: CpuPool, allow_js_eval: bool, app: T, address: Embeddable) -> Self {
|
||||
let mut info = EndpointInfo::from(app.info());
|
||||
info.allow_js_eval = Some(allow_js_eval);
|
||||
Dapp {
|
||||
pool,
|
||||
app,
|
||||
safe_to_embed_on: address,
|
||||
info: EndpointInfo::from(info),
|
||||
info,
|
||||
fallback_to_index_html: false,
|
||||
}
|
||||
}
|
||||
@ -117,6 +119,7 @@ impl<T: WebApp> Endpoint for Dapp<T> {
|
||||
file,
|
||||
cache: PageCache::Disabled,
|
||||
safe_to_embed_on: self.safe_to_embed_on.clone(),
|
||||
allow_js_eval: self.info.allow_js_eval.clone().unwrap_or(false),
|
||||
}.into_response();
|
||||
|
||||
self.pool.spawn(reader).forget();
|
||||
@ -128,12 +131,14 @@ impl<T: WebApp> Endpoint for Dapp<T> {
|
||||
impl From<Info> for EndpointInfo {
|
||||
fn from(info: Info) -> Self {
|
||||
EndpointInfo {
|
||||
id: None,
|
||||
name: info.name.into(),
|
||||
description: info.description.into(),
|
||||
author: info.author.into(),
|
||||
icon_url: info.icon_url.into(),
|
||||
local_url: None,
|
||||
version: info.version.into(),
|
||||
allow_js_eval: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -59,6 +59,8 @@ pub struct PageHandler<T: DappFile> {
|
||||
pub safe_to_embed_on: Embeddable,
|
||||
/// Cache settings for this page.
|
||||
pub cache: PageCache,
|
||||
/// Allow JS unsafe-eval.
|
||||
pub allow_js_eval: bool,
|
||||
}
|
||||
|
||||
impl<T: DappFile> PageHandler<T> {
|
||||
@ -93,7 +95,7 @@ impl<T: DappFile> PageHandler<T> {
|
||||
|
||||
headers.set(header::ContentType(file.content_type().to_owned()));
|
||||
|
||||
add_security_headers(&mut headers, self.safe_to_embed_on);
|
||||
add_security_headers(&mut headers, self.safe_to_embed_on, self.allow_js_eval);
|
||||
}
|
||||
|
||||
let initial_content = if file.content_type().to_owned() == mime::TEXT_HTML {
|
||||
|
@ -98,6 +98,7 @@ impl Dapp {
|
||||
file: self.get_file(path),
|
||||
cache: self.cache,
|
||||
safe_to_embed_on: self.embeddable_on.clone(),
|
||||
allow_js_eval: self.info.as_ref().and_then(|x| x.allow_js_eval).unwrap_or(false),
|
||||
}.into_response();
|
||||
|
||||
self.pool.spawn(reader).forget();
|
||||
|
@ -181,7 +181,7 @@ fn should_return_fetched_dapp_content() {
|
||||
assert_security_headers_for_embed(&response2.headers);
|
||||
assert_eq!(
|
||||
response2.body,
|
||||
r#"D2
|
||||
r#"EA
|
||||
{
|
||||
"id": "9c94e154dab8acf859b30ee80fc828fb1d38359d938751b65db71d460588d82a",
|
||||
"name": "Gavcoin",
|
||||
@ -189,7 +189,8 @@ fn should_return_fetched_dapp_content() {
|
||||
"version": "1.0.0",
|
||||
"author": "",
|
||||
"iconUrl": "icon.png",
|
||||
"localUrl": null
|
||||
"localUrl": null,
|
||||
"allowJsEval": false
|
||||
}
|
||||
0
|
||||
|
||||
|
@ -13,8 +13,8 @@ rustc_version = "0.1"
|
||||
parity-ui-dev = { path = "../../js", optional = true }
|
||||
parity-ui-old-dev = { path = "../../js-old", optional = true }
|
||||
# This is managed by the js/scripts/release.sh script on CI - keep it in a single line
|
||||
parity-ui-old-precompiled = { git = "https://github.com/js-dist-paritytech/parity-master-1-9-v1.git", optional = true }
|
||||
parity-ui-precompiled = { git = "https://github.com/js-dist-paritytech/parity-master-1-9-shell.git", optional = true }
|
||||
parity-ui-old-precompiled = { git = "https://github.com/js-dist-paritytech/parity-stable-1-9-v1.git", optional = true }
|
||||
parity-ui-precompiled = { git = "https://github.com/js-dist-paritytech/parity-stable-1-9-shell.git", optional = true }
|
||||
|
||||
[features]
|
||||
no-precompiled-js = ["parity-ui-dev", "parity-ui-old-dev"]
|
||||
|
@ -8,11 +8,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ansi_term = "0.9"
|
||||
bloomchain = "0.1"
|
||||
bloomchain = { path = "../util/bloomchain" }
|
||||
bn = { git = "https://github.com/paritytech/bn" }
|
||||
byteorder = "1.0"
|
||||
common-types = { path = "types" }
|
||||
crossbeam = "0.2.9"
|
||||
crossbeam = "0.3"
|
||||
ethash = { path = "../ethash" }
|
||||
ethcore-bloom-journal = { path = "../util/bloom" }
|
||||
ethcore-bytes = { path = "../util/bytes" }
|
||||
|
@ -33,7 +33,7 @@ impl Factory {
|
||||
/// Create fresh instance of VM
|
||||
/// Might choose implementation depending on supplied gas.
|
||||
#[cfg(feature = "jit")]
|
||||
pub fn create(&self, gas: U256) -> Box<Vm> {
|
||||
pub fn create(&self, gas: &U256) -> Box<Vm> {
|
||||
match self.evm {
|
||||
VMType::Jit => {
|
||||
Box::new(super::jit::JitEvm::default())
|
||||
@ -49,7 +49,7 @@ impl Factory {
|
||||
/// Create fresh instance of VM
|
||||
/// Might choose implementation depending on supplied gas.
|
||||
#[cfg(not(feature = "jit"))]
|
||||
pub fn create(&self, gas: U256) -> Box<Vm> {
|
||||
pub fn create(&self, gas: &U256) -> Box<Vm> {
|
||||
match self.evm {
|
||||
VMType::Interpreter => if Self::can_fit_in_usize(gas) {
|
||||
Box::new(super::interpreter::Interpreter::<usize>::new(self.evm_cache.clone()))
|
||||
@ -68,8 +68,8 @@ impl Factory {
|
||||
}
|
||||
}
|
||||
|
||||
fn can_fit_in_usize(gas: U256) -> bool {
|
||||
gas == U256::from(gas.low_u64() as usize)
|
||||
fn can_fit_in_usize(gas: &U256) -> bool {
|
||||
gas == &U256::from(gas.low_u64() as usize)
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ impl Default for Factory {
|
||||
|
||||
#[test]
|
||||
fn test_create_vm() {
|
||||
let _vm = Factory::default().create(U256::zero());
|
||||
let _vm = Factory::default().create(&U256::zero());
|
||||
}
|
||||
|
||||
/// Create tests by injecting different VM factories
|
||||
|
@ -914,8 +914,13 @@ mod tests {
|
||||
use rustc_hex::FromHex;
|
||||
use vmtype::VMType;
|
||||
use factory::Factory;
|
||||
use vm::{ActionParams, ActionValue};
|
||||
use vm::{Vm, ActionParams, ActionValue};
|
||||
use vm::tests::{FakeExt, test_finalize};
|
||||
use bigint::prelude::U256;
|
||||
|
||||
fn interpreter(gas: &U256) -> Box<Vm> {
|
||||
Factory::new(VMType::Interpreter, 1).create(gas)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_not_fail_on_tracing_mem() {
|
||||
@ -932,7 +937,7 @@ mod tests {
|
||||
ext.tracing = true;
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = Factory::new(VMType::Interpreter, 1).create(params.gas);
|
||||
let mut vm = interpreter(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -954,7 +959,7 @@ mod tests {
|
||||
ext.tracing = true;
|
||||
|
||||
let err = {
|
||||
let mut vm = Factory::new(VMType::Interpreter, 1).create(params.gas);
|
||||
let mut vm = interpreter(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).err().unwrap()
|
||||
};
|
||||
|
||||
|
@ -40,7 +40,7 @@ fn test_add(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -60,7 +60,7 @@ fn test_sha3(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -80,7 +80,7 @@ fn test_address(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -102,7 +102,7 @@ fn test_origin(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -124,7 +124,7 @@ fn test_sender(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -159,7 +159,7 @@ fn test_extcodecopy(factory: super::Factory) {
|
||||
ext.codes.insert(sender, Arc::new(sender_code));
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -179,7 +179,7 @@ fn test_log_empty(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -211,7 +211,7 @@ fn test_log_sender(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -236,7 +236,7 @@ fn test_blockhash(factory: super::Factory) {
|
||||
ext.blockhashes.insert(U256::zero(), blockhash.clone());
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -258,7 +258,7 @@ fn test_calldataload(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -279,7 +279,7 @@ fn test_author(factory: super::Factory) {
|
||||
ext.info.author = author;
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -299,7 +299,7 @@ fn test_timestamp(factory: super::Factory) {
|
||||
ext.info.timestamp = timestamp;
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -319,7 +319,7 @@ fn test_number(factory: super::Factory) {
|
||||
ext.info.number = number;
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -339,7 +339,7 @@ fn test_difficulty(factory: super::Factory) {
|
||||
ext.info.difficulty = difficulty;
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -359,7 +359,7 @@ fn test_gas_limit(factory: super::Factory) {
|
||||
ext.info.gas_limit = gas_limit;
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -377,7 +377,7 @@ fn test_mul(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -395,7 +395,7 @@ fn test_sub(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -413,7 +413,7 @@ fn test_div(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -431,7 +431,7 @@ fn test_div_zero(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -449,7 +449,7 @@ fn test_mod(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -468,7 +468,7 @@ fn test_smod(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -487,7 +487,7 @@ fn test_sdiv(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -506,7 +506,7 @@ fn test_exp(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -526,7 +526,7 @@ fn test_comparison(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -547,7 +547,7 @@ fn test_signed_comparison(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -568,7 +568,7 @@ fn test_bitops(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -591,7 +591,7 @@ fn test_addmod_mulmod(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -612,7 +612,7 @@ fn test_byte(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -631,7 +631,7 @@ fn test_signextend(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -651,7 +651,7 @@ fn test_badinstruction_int() {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let err = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
||||
};
|
||||
|
||||
@ -671,7 +671,7 @@ fn test_pop(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -691,7 +691,7 @@ fn test_extops(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -714,7 +714,7 @@ fn test_jumps(factory: super::Factory) {
|
||||
let mut ext = FakeExt::new();
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -742,7 +742,7 @@ fn test_calls(factory: super::Factory) {
|
||||
};
|
||||
|
||||
let gas_left = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||
};
|
||||
|
||||
@ -781,7 +781,7 @@ fn test_create_in_staticcall(factory: super::Factory) {
|
||||
ext.is_static = true;
|
||||
|
||||
let err = {
|
||||
let mut vm = factory.create(params.gas);
|
||||
let mut vm = factory.create(¶ms.gas);
|
||||
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
||||
};
|
||||
|
||||
|
@ -16,7 +16,6 @@ memorydb = { path = "../../util/memorydb" }
|
||||
patricia-trie = { path = "../../util/patricia_trie" }
|
||||
ethcore-network = { path = "../../util/network" }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
evm = { path = "../evm" }
|
||||
heapsize = "0.4"
|
||||
vm = { path = "../vm" }
|
||||
rlp = { path = "../../util/rlp" }
|
||||
|
@ -60,7 +60,6 @@ extern crate ethcore_util as util;
|
||||
extern crate ethcore_bigint as bigint;
|
||||
extern crate ethcore_bytes as bytes;
|
||||
extern crate ethcore;
|
||||
extern crate evm;
|
||||
extern crate heapsize;
|
||||
extern crate futures;
|
||||
extern crate itertools;
|
||||
|
@ -18,6 +18,7 @@
|
||||
//! The request service is implemented using Futures. Higher level request handlers
|
||||
//! will take the raw data received here and extract meaningful results from it.
|
||||
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
@ -28,6 +29,7 @@ use futures::{Async, Poll, Future};
|
||||
use futures::sync::oneshot::{self, Sender, Receiver, Canceled};
|
||||
use network::PeerId;
|
||||
use parking_lot::{RwLock, Mutex};
|
||||
use rand;
|
||||
|
||||
use net::{
|
||||
self, Handler, PeerStatus, Status, Capabilities,
|
||||
@ -90,7 +92,14 @@ impl Pending {
|
||||
match self.requests[idx].respond_local(cache) {
|
||||
Some(response) => {
|
||||
self.requests.supply_response_unchecked(&response);
|
||||
|
||||
// update header and back-references after each from-cache
|
||||
// response to ensure that the requests are left in a consistent
|
||||
// state and increase the likelihood of being able to answer
|
||||
// the next request from cache.
|
||||
self.update_header_refs(idx, &response);
|
||||
self.fill_unanswered();
|
||||
|
||||
self.responses.push(response);
|
||||
}
|
||||
None => break,
|
||||
@ -382,7 +391,10 @@ impl OnDemand {
|
||||
true => None,
|
||||
})
|
||||
.filter_map(|pending| {
|
||||
for (peer_id, peer) in peers.iter() { // .shuffle?
|
||||
// the peer we dispatch to is chosen randomly
|
||||
let num_peers = peers.len();
|
||||
let rng = rand::random::<usize>() % cmp::max(num_peers, 1);
|
||||
for (peer_id, peer) in peers.iter().chain(peers.iter()).skip(rng).take(num_peers) {
|
||||
// TODO: see which requests can be answered by the cache?
|
||||
|
||||
if !peer.can_fulfill(&pending.required_capabilities) {
|
||||
|
@ -26,6 +26,7 @@
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x1",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0xffffffffffffffff",
|
||||
"eip140Transition": "0x0",
|
||||
"eip211Transition": "0x0",
|
||||
|
@ -15,7 +15,8 @@
|
||||
"ecip1010ContinueTransition": 5000000,
|
||||
"ecip1017EraRounds": 5000000,
|
||||
"eip161abcTransition": "0x7fffffffffffffff",
|
||||
"eip161dTransition": "0x7fffffffffffffff"
|
||||
"eip161dTransition": "0x7fffffffffffffff",
|
||||
"bombDefuseTransition": 5900000
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -26,6 +26,7 @@
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x1",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0xffffffffffffffff",
|
||||
"eip140Transition": "0x0",
|
||||
"eip210Transition": "0x0",
|
||||
|
@ -25,7 +25,8 @@
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffffff",
|
||||
"eip155Transition": "0x7fffffffffffffff",
|
||||
"maxCodeSize": 24576
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x7fffffffffffffff"
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
|
@ -25,7 +25,8 @@
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffffff",
|
||||
"eip155Transition": "0x7fffffffffffffff",
|
||||
"maxCodeSize": 24576
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x0"
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
|
@ -152,6 +152,7 @@
|
||||
"eip98Transition": "0x7fffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffff",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": 2675000,
|
||||
"eip140Transition": 4370000,
|
||||
"eip211Transition": 4370000,
|
||||
"eip214Transition": 4370000,
|
||||
|
@ -34,13 +34,16 @@
|
||||
"networkID" : "0x2A",
|
||||
"forkBlock": 4297256,
|
||||
"forkCanonHash": "0x0a66d93c2f727dca618fabaf70c39b37018c73d78b939d8b11efbbd09034778f",
|
||||
"validateReceiptsTransition" : 1000000,
|
||||
"eip155Transition": 1000000,
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": 6600000,
|
||||
"validateChainIdTransition": 1000000,
|
||||
"validateReceiptsTransition" : 1000000,
|
||||
"eip140Transition": 5067000,
|
||||
"eip211Transition": 5067000,
|
||||
"eip214Transition": 5067000,
|
||||
"eip658Transition": 5067000
|
||||
"eip658Transition": 5067000,
|
||||
"wasmActivationTransition": 6600000
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
|
74
ethcore/res/ethereum/kovan_wasm_test.json
Normal file
74
ethcore/res/ethereum/kovan_wasm_test.json
Normal file
@ -0,0 +1,74 @@
|
||||
{
|
||||
"name": "Kovan (Test)",
|
||||
"dataDir": "kovan-test",
|
||||
"engine": {
|
||||
"authorityRound": {
|
||||
"params": {
|
||||
"stepDuration": "4",
|
||||
"blockReward": "0x4563918244F40000",
|
||||
"validators" : {
|
||||
"list": [
|
||||
"0x00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED",
|
||||
"0x00427feae2419c15b89d1c21af10d1b6650a4d3d",
|
||||
"0x4Ed9B08e6354C70fE6F8CB0411b0d3246b424d6c",
|
||||
"0x0020ee4Be0e2027d76603cB751eE069519bA81A1",
|
||||
"0x0010f94b296a852aaac52ea6c5ac72e03afd032d",
|
||||
"0x007733a1FE69CF3f2CF989F81C7b4cAc1693387A",
|
||||
"0x00E6d2b931F55a3f1701c7389d592a7778897879",
|
||||
"0x00e4a10650e5a6D6001C38ff8E64F97016a1645c",
|
||||
"0x00a0a24b9f0e5ec7aa4c7389b8302fd0123194de"
|
||||
]
|
||||
},
|
||||
"validateScoreTransition": 1000000,
|
||||
"validateStepTransition": 1500000,
|
||||
"maximumUncleCountTransition": 5067000,
|
||||
"maximumUncleCount": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"gasLimitBoundDivisor": "0x400",
|
||||
"registrar" : "0xfAb104398BBefbd47752E7702D9fE23047E1Bca3",
|
||||
"maximumExtraDataSize": "0x20",
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x2A",
|
||||
"forkBlock": 4297256,
|
||||
"forkCanonHash": "0x0a66d93c2f727dca618fabaf70c39b37018c73d78b939d8b11efbbd09034778f",
|
||||
"validateReceiptsTransition" : 1000000,
|
||||
"eip155Transition": 1000000,
|
||||
"validateChainIdTransition": 1000000,
|
||||
"eip140Transition": 5067000,
|
||||
"eip211Transition": 5067000,
|
||||
"eip214Transition": 5067000,
|
||||
"eip658Transition": 5067000,
|
||||
"wasmActivationTransition": 10
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
"authorityRound": {
|
||||
"step": "0x0",
|
||||
"signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
},
|
||||
"difficulty": "0x20000",
|
||||
"gasLimit": "0x5B8D80"
|
||||
},
|
||||
"accounts": {
|
||||
"0x0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
||||
"0x0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
|
||||
"0x0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
|
||||
"0x0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
|
||||
"0x0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": 5067000, "pricing": { "modexp": { "divisor": 20 } } } },
|
||||
"0x0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": 5067000, "pricing": { "linear": { "base": 500, "word": 0 } } } },
|
||||
"0x0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": 5067000, "pricing": { "linear": { "base": 40000, "word": 0 } } } },
|
||||
"0x0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": 5067000, "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } },
|
||||
"0x00521965e7bd230323c423d96c657db5b79d099f": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
|
||||
},
|
||||
"nodes": [
|
||||
"enode://56abaf065581a5985b8c5f4f88bd202526482761ba10be9bfdcd14846dd01f652ec33fde0f8c0fd1db19b59a4c04465681fcef50e11380ca88d25996191c52de@40.71.221.215:30303",
|
||||
"enode://d07827483dc47b368eaf88454fb04b41b7452cf454e194e2bd4c14f98a3278fed5d819dbecd0d010407fc7688d941ee1e58d4f9c6354d3da3be92f55c17d7ce3@52.166.117.77:30303",
|
||||
"enode://8fa162563a8e5a05eef3e1cd5abc5828c71344f7277bb788a395cce4a0e30baf2b34b92fe0b2dbbba2313ee40236bae2aab3c9811941b9f5a7e8e90aaa27ecba@52.165.239.18:30303",
|
||||
"enode://7e2e7f00784f516939f94e22bdc6cf96153603ca2b5df1c7cc0f90a38e7a2f218ffb1c05b156835e8b49086d11fdd1b3e2965be16baa55204167aa9bf536a4d9@52.243.47.56:30303",
|
||||
"enode://0518a3d35d4a7b3e8c433e7ffd2355d84a1304ceb5ef349787b556197f0c87fad09daed760635b97d52179d645d3e6d16a37d2cc0a9945c2ddf585684beb39ac@40.68.248.100:30303"
|
||||
]
|
||||
}
|
@ -40,7 +40,8 @@
|
||||
"eip211Transition":"0x7fffffffffffff",
|
||||
"eip214Transition":"0x7fffffffffffff",
|
||||
"eip658Transition":"0x7fffffffffffff",
|
||||
"maxCodeSize":"0x6000"
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition": "0x7fffffffffffff"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
|
161
ethcore/res/ethereum/mcip6_byz.json
Normal file
161
ethcore/res/ethereum/mcip6_byz.json
Normal file
@ -0,0 +1,161 @@
|
||||
{
|
||||
"name":"Musicoin Byzantium Test",
|
||||
"dataDir":"mcip6test",
|
||||
"engine":{
|
||||
"Ethash":{
|
||||
"params":{
|
||||
"minimumDifficulty":"0x020000",
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"homesteadTransition":"0x17",
|
||||
"eip100bTransition":"0x2a",
|
||||
"eip150Transition":"0x2a",
|
||||
"eip160Transition":"0x7fffffffffffff",
|
||||
"eip161abcTransition":"0x7fffffffffffff",
|
||||
"eip161dTransition":"0x7fffffffffffff",
|
||||
"eip649Transition":"0x2a",
|
||||
"blockReward":"0x1105a0185b50a80000",
|
||||
"mcip3Transition":"0x17",
|
||||
"mcip3MinerReward":"0xd8d726b7177a80000",
|
||||
"mcip3UbiReward":"0x2b5e3af16b1880000",
|
||||
"mcip3UbiContract":"0x00efdd5883ec628983e9063c7d969fe268bbf310",
|
||||
"mcip3DevReward":"0xc249fdd327780000",
|
||||
"mcip3DevContract":"0x00756cf8159095948496617f5fb17ed95059f536"
|
||||
}
|
||||
}
|
||||
},
|
||||
"params":{
|
||||
"gasLimitBoundDivisor":"0x0400",
|
||||
"registrar":"0x5C271c4C9A67E7D73b7b3669d47504741354f21D",
|
||||
"accountStartNonce":"0x00",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x76740c",
|
||||
"forkBlock":"0x2b",
|
||||
"forkCanonHash":"0x23c3171e864a5d513a3ef85e4cf86dac4cc36b89e5b8e63bf0ebcca68b9e43c9",
|
||||
"eip86Transition":"0x7fffffffffffff",
|
||||
"eip98Transition":"0x7fffffffffffff",
|
||||
"eip140Transition":"0x2a",
|
||||
"eip155Transition":"0x2a",
|
||||
"eip211Transition":"0x2a",
|
||||
"eip214Transition":"0x2a",
|
||||
"eip658Transition":"0x2a",
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition": "0x7fffffffffffff"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
"ethereum":{
|
||||
"nonce":"0x000000000000002a",
|
||||
"mixHash":"0x00000000000000000000000000000000000000647572616c65787365646c6578"
|
||||
}
|
||||
},
|
||||
"difficulty":"0x3d0900",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x00",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"",
|
||||
"gasLimit":"0x7a1200"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://5ddc110733f6d34101973cdef3f9b43484159acf6f816d3b1ee92bc3c98ea453e857bb1207edf0ec0242008ab3a0f9f05eeaee99d47bd414c08a5bdf4847de13@176.9.3.148:30303",
|
||||
"enode://38f074f4db8e64dfbaf87984bf290eef67772a901a7113d1b62f36216be152b8450c393d6fc562a5e38f04f99bc8f439a99010a230b1d92dc1df43bf0bd00615@176.9.3.148:30403"
|
||||
],
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"ecrecover",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"sha256",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"ripemd160",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"identity",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
"pair":80000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -14,9 +14,9 @@
|
||||
"ecip1010PauseTransition": 1915000,
|
||||
"ecip1010ContinueTransition": 3415000,
|
||||
"ecip1017EraRounds": 2000000,
|
||||
|
||||
"eip161abcTransition": "0x7fffffffffffffff",
|
||||
"eip161dTransition": "0x7fffffffffffffff"
|
||||
"eip161dTransition": "0x7fffffffffffffff",
|
||||
"bombDefuseTransition": 2300000
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -31,7 +31,6 @@
|
||||
"forkBlock": "0x1b34d8",
|
||||
"forkCanonHash": "0xf376243aeff1f256d970714c3de9fd78fa4e63cf63e32a51fe1169e375d98145",
|
||||
"eip155Transition": 1915000,
|
||||
|
||||
"eip98Transition": "0x7fffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffff"
|
||||
},
|
||||
|
@ -8,12 +8,12 @@
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"homesteadTransition":"0x118c30",
|
||||
"eip100bTransition":"0x7fffffffffffff",
|
||||
"eip150Transition":"0x7fffffffffffff",
|
||||
"eip160Transition":"0x7fffffffffffff",
|
||||
"eip161abcTransition":"0x7fffffffffffff",
|
||||
"eip161dTransition":"0x7fffffffffffff",
|
||||
"eip649Transition":"0x7fffffffffffff",
|
||||
"eip100bTransition":"0x21e88e",
|
||||
"eip150Transition":"0x21e88e",
|
||||
"eip160Transition":"0x21e88e",
|
||||
"eip161abcTransition":"0x21e88e",
|
||||
"eip161dTransition":"0x21e88e",
|
||||
"eip649Transition":"0x21e88e",
|
||||
"blockReward":"0x1105a0185b50a80000",
|
||||
"mcip3Transition":"0x124f81",
|
||||
"mcip3MinerReward":"0xd8d726b7177a80000",
|
||||
@ -31,15 +31,15 @@
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x76740f",
|
||||
"forkBlock":"0x5b6",
|
||||
"forkCanonHash":"0xa5e88ad9e34d113e264e307bc27e8471452c8fc13780324bb3abb96fd0558343",
|
||||
"forkBlock":"0x1d8015",
|
||||
"forkCanonHash":"0x380602acf82b629a0be6b5adb2b4a801e960a07dc8261bf196d21befdbb8f2f9",
|
||||
"eip86Transition":"0x7fffffffffffff",
|
||||
"eip98Transition":"0x7fffffffffffff",
|
||||
"eip140Transition":"0x7fffffffffffff",
|
||||
"eip155Transition":"0x7fffffffffffff",
|
||||
"eip211Transition":"0x7fffffffffffff",
|
||||
"eip214Transition":"0x7fffffffffffff",
|
||||
"eip658Transition":"0x7fffffffffffff",
|
||||
"eip140Transition":"0x21e88e",
|
||||
"eip155Transition":"0x21e88e",
|
||||
"eip211Transition":"0x21e88e",
|
||||
"eip214Transition":"0x21e88e",
|
||||
"eip658Transition":"0x21e88e",
|
||||
"maxCodeSize":"0x6000"
|
||||
},
|
||||
"genesis":{
|
||||
@ -57,12 +57,9 @@
|
||||
"gasLimit":"0x7a1200"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
||||
"enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303",
|
||||
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303",
|
||||
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303",
|
||||
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
||||
"enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
|
||||
"enode://09fcd36d553044c8b499b9b9e13a228ffd99572c513f77073d41f009717c464cd4399c0e665d6aff1590324254ee4e698b2b2533b1998dd04d896b9d6aff7895@35.185.67.35:30303",
|
||||
"enode://89e51a34770a0badf8ea18c4c4d2c361cde707abd60031d99b1ab3010363e1898230a516ddb37d974af8d8db1b322779d7fe0caae0617bed4924d1b4968cf92b@35.231.48.142:30303",
|
||||
"enode://b58c0c71f08864c0cf7fa9dea2c4cbefae5ae7a36cc30d286603b24982d25f3ccc056b589119324c51768fc2054b8c529ecf682e06e1e9980170b93ff194ed7a@132.148.132.9:30303",
|
||||
"enode://d302f52c8789ad87ee528f1431a67f1aa646c9bec17babb4665dfb3d61de5b9119a70aa77b2147a5f28854092ba09769323c1c552a6ac6f6a34cbcf767e2d2fe@158.69.248.48:30303",
|
||||
"enode://c72564bce8331ae298fb8ece113a456e3927d7e5989c2be3e445678b3600579f722410ef9bbfe339335d676af77343cb21b5b1703b7bebc32be85fce937a2220@191.252.185.71:30303",
|
||||
"enode://e3ae4d25ee64791ff98bf17c37acf90933359f2505c00f65c84f6863231a32a94153cadb0a462e428f18f35ded6bd91cd91033d26576a28558c22678be9cfaee@5.63.158.137:35555"
|
||||
@ -119,7 +116,7 @@
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
@ -130,7 +127,7 @@
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
@ -142,7 +139,7 @@
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
@ -154,7 +151,7 @@
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
|
@ -29,6 +29,7 @@
|
||||
"forkBlock": 641350,
|
||||
"forkCanonHash": "0x8033403e9fe5811a7b6d6b469905915de1c59207ce2172cbcf5d6ff14fa6a2eb",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": 10,
|
||||
"eip155Transition": 10,
|
||||
"eip98Transition": "0x7fffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffff",
|
||||
@ -52,8 +53,10 @@
|
||||
"gasLimit": "0x1000000"
|
||||
},
|
||||
"nodes": [
|
||||
"enode://20c9ad97c081d63397d7b685a412227a40e23c8bdc6688c6f37e97cfbc22d2b4d1db1510d8f61e6a8866ad7f0e17c02b14182d37ea7c3c8b9c2683aeb6b733a1@52.169.14.227:30303",
|
||||
"enode://6ce05930c72abc632c58e2e4324f7c7ea478cec0ed4fa2528982cf34483094e9cbc9216e7aa349691242576d552a2a56aaeae426c5303ded677ce455ba1acd9d@13.84.180.240:30303"
|
||||
"enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303",
|
||||
"enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303",
|
||||
"enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303",
|
||||
"enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303"
|
||||
],
|
||||
"accounts": {
|
||||
"0000000000000000000000000000000000000000": { "balance": "1" },
|
||||
|
@ -26,6 +26,7 @@
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x1",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0",
|
||||
"eip98Transition": "5",
|
||||
"eip140Transition": "5",
|
||||
"eip211Transition": "5",
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit d9d6133c1bc5dca4c74c9eb758a39546a0d46b45
|
||||
Subproject commit fb111c82deff8759f54a5038d07cecc77cb5a663
|
@ -264,9 +264,9 @@ impl AccountProvider {
|
||||
Ok(Address::from(account.address).into())
|
||||
}
|
||||
|
||||
/// Import a new presale wallet.
|
||||
pub fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error> {
|
||||
let account = self.sstore.import_wallet(SecretVaultRef::Root, json, password)?;
|
||||
/// Import a new wallet.
|
||||
pub fn import_wallet(&self, json: &[u8], password: &str, gen_id: bool) -> Result<Address, Error> {
|
||||
let account = self.sstore.import_wallet(SecretVaultRef::Root, json, password, gen_id)?;
|
||||
if self.blacklisted_accounts.contains(&account.address) {
|
||||
self.sstore.remove_account(&account, password)?;
|
||||
return Err(SSError::InvalidAccount.into());
|
||||
|
@ -32,7 +32,7 @@ pub struct BlockInfo {
|
||||
}
|
||||
|
||||
/// Describes location of newly inserted block.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum BlockLocation {
|
||||
/// It's part of the canon chain.
|
||||
CanonChain,
|
||||
@ -44,7 +44,7 @@ pub enum BlockLocation {
|
||||
BranchBecomingCanonChain(BranchBecomingCanonChainData),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct BranchBecomingCanonChainData {
|
||||
/// Hash of the newest common ancestor with old canon chain.
|
||||
pub ancestor: H256,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -18,7 +18,6 @@
|
||||
|
||||
use std::ops;
|
||||
use std::io::Write;
|
||||
use bloomchain;
|
||||
use blooms::{GroupPosition, BloomGroup};
|
||||
use db::Key;
|
||||
use engines::epoch::{Transition as EpochTransition};
|
||||
@ -98,32 +97,17 @@ impl ops::Deref for LogGroupKey {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
|
||||
pub struct LogGroupPosition(GroupPosition);
|
||||
|
||||
impl From<bloomchain::group::GroupPosition> for LogGroupPosition {
|
||||
fn from(position: bloomchain::group::GroupPosition) -> Self {
|
||||
LogGroupPosition(From::from(position))
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSizeOf for LogGroupPosition {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
self.0.heap_size_of_children()
|
||||
}
|
||||
}
|
||||
|
||||
impl Key<BloomGroup> for LogGroupPosition {
|
||||
impl Key<BloomGroup> for GroupPosition {
|
||||
type Target = LogGroupKey;
|
||||
|
||||
fn key(&self) -> Self::Target {
|
||||
let mut result = [0u8; 6];
|
||||
result[0] = ExtrasIndex::BlocksBlooms as u8;
|
||||
result[1] = self.0.level;
|
||||
result[2] = (self.0.index >> 24) as u8;
|
||||
result[3] = (self.0.index >> 16) as u8;
|
||||
result[4] = (self.0.index >> 8) as u8;
|
||||
result[5] = self.0.index as u8;
|
||||
result[1] = self.level;
|
||||
result[2] = (self.index >> 24) as u8;
|
||||
result[3] = (self.index >> 16) as u8;
|
||||
result[4] = (self.index >> 8) as u8;
|
||||
result[5] = self.index as u8;
|
||||
LogGroupKey(result)
|
||||
}
|
||||
}
|
||||
|
219
ethcore/src/blockchain/generator.rs
Normal file
219
ethcore/src/blockchain/generator.rs
Normal file
@ -0,0 +1,219 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Blockchain generator for tests.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use bigint::prelude::{U256, H256};
|
||||
use bloomchain::Bloom;
|
||||
|
||||
use bytes::Bytes;
|
||||
use header::Header;
|
||||
use rlp::encode;
|
||||
use transaction::SignedTransaction;
|
||||
use views::BlockView;
|
||||
|
||||
/// Helper structure, used for encoding blocks.
|
||||
#[derive(Default, Clone, RlpEncodable)]
|
||||
pub struct Block {
|
||||
pub header: Header,
|
||||
pub transactions: Vec<SignedTransaction>,
|
||||
pub uncles: Vec<Header>
|
||||
}
|
||||
|
||||
impl Block {
|
||||
#[inline]
|
||||
pub fn header(&self) -> Header {
|
||||
self.header.clone()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn hash(&self) -> H256 {
|
||||
BlockView::new(&self.encoded()).header_view().hash()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn number(&self) -> u64 {
|
||||
self.header.number()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn encoded(&self) -> Bytes {
|
||||
encode(self).into_vec()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BlockOptions {
|
||||
pub difficulty: U256,
|
||||
pub bloom: Bloom,
|
||||
pub transactions: Vec<SignedTransaction>,
|
||||
}
|
||||
|
||||
impl Default for BlockOptions {
|
||||
fn default() -> Self {
|
||||
BlockOptions {
|
||||
difficulty: 10.into(),
|
||||
bloom: Bloom::default(),
|
||||
transactions: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockBuilder {
|
||||
blocks: VecDeque<Block>,
|
||||
}
|
||||
|
||||
impl BlockBuilder {
|
||||
pub fn genesis() -> Self {
|
||||
let mut blocks = VecDeque::with_capacity(1);
|
||||
blocks.push_back(Block::default());
|
||||
|
||||
BlockBuilder {
|
||||
blocks,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_block(&self) -> Self {
|
||||
self.add_block_with(|| BlockOptions::default())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_blocks(&self, count: usize) -> Self {
|
||||
self.add_blocks_with(count, || BlockOptions::default())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_block_with<T>(&self, get_metadata: T) -> Self where T: Fn() -> BlockOptions {
|
||||
self.add_blocks_with(1, get_metadata)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_block_with_difficulty<T>(&self, difficulty: T) -> Self where T: Into<U256> {
|
||||
let difficulty = difficulty.into();
|
||||
self.add_blocks_with(1, move || BlockOptions {
|
||||
difficulty,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_block_with_transactions<T>(&self, transactions: T) -> Self
|
||||
where T: IntoIterator<Item = SignedTransaction> {
|
||||
let transactions = transactions.into_iter().collect::<Vec<_>>();
|
||||
self.add_blocks_with(1, || BlockOptions {
|
||||
transactions: transactions.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn add_block_with_bloom(&self, bloom: Bloom) -> Self {
|
||||
self.add_blocks_with(1, move || BlockOptions {
|
||||
bloom,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_blocks_with<T>(&self, count: usize, get_metadata: T) -> Self where T: Fn() -> BlockOptions {
|
||||
assert!(count > 0, "There must be at least 1 block");
|
||||
let mut parent_hash = self.last().hash();
|
||||
let mut parent_number = self.last().number();
|
||||
let mut blocks = VecDeque::with_capacity(count);
|
||||
for _ in 0..count {
|
||||
let mut block = Block::default();
|
||||
let metadata = get_metadata();
|
||||
let block_number = parent_number + 1;
|
||||
block.header.set_parent_hash(parent_hash);
|
||||
block.header.set_number(block_number);
|
||||
block.header.set_log_bloom(metadata.bloom);
|
||||
block.header.set_difficulty(metadata.difficulty);
|
||||
block.transactions = metadata.transactions;
|
||||
|
||||
parent_hash = block.hash();
|
||||
parent_number = block_number;
|
||||
|
||||
blocks.push_back(block);
|
||||
}
|
||||
|
||||
BlockBuilder {
|
||||
blocks,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn last(&self) -> &Block {
|
||||
self.blocks.back().expect("There is always at least 1 block")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BlockGenerator {
|
||||
builders: VecDeque<BlockBuilder>,
|
||||
}
|
||||
|
||||
impl BlockGenerator {
|
||||
pub fn new<T>(builders: T) -> Self where T: IntoIterator<Item = BlockBuilder> {
|
||||
BlockGenerator {
|
||||
builders: builders.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for BlockGenerator {
|
||||
type Item = Block;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
loop {
|
||||
match self.builders.front_mut() {
|
||||
Some(ref mut builder) => {
|
||||
if let Some(block) = builder.blocks.pop_front() {
|
||||
return Some(block);
|
||||
}
|
||||
},
|
||||
None => return None,
|
||||
}
|
||||
self.builders.pop_front();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{BlockBuilder, BlockOptions, BlockGenerator};
|
||||
|
||||
#[test]
|
||||
fn test_block_builder() {
|
||||
let genesis = BlockBuilder::genesis();
|
||||
let block_1 = genesis.add_block();
|
||||
let block_1001 = block_1.add_blocks(1000);
|
||||
let block_1002 = block_1001.add_block_with(|| BlockOptions::default());
|
||||
let generator = BlockGenerator::new(vec![genesis, block_1, block_1001, block_1002]);
|
||||
assert_eq!(generator.count(), 1003);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_builder_fork() {
|
||||
let genesis = BlockBuilder::genesis();
|
||||
let block_10a = genesis.add_blocks(10);
|
||||
let block_11b = genesis.add_blocks(11);
|
||||
assert_eq!(block_10a.last().number(), 10);
|
||||
assert_eq!(block_11b.last().number(), 11);
|
||||
}
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use rlp::*;
|
||||
use bigint::hash::{H256, H2048};
|
||||
use bytes::Bytes;
|
||||
use header::Header;
|
||||
use transaction::SignedTransaction;
|
||||
|
||||
use super::fork::Forkable;
|
||||
use super::bloom::WithBloom;
|
||||
use super::complete::CompleteBlock;
|
||||
use super::transaction::WithTransaction;
|
||||
|
||||
/// Helper structure, used for encoding blocks.
|
||||
#[derive(Default)]
|
||||
pub struct Block {
|
||||
pub header: Header,
|
||||
pub transactions: Vec<SignedTransaction>,
|
||||
pub uncles: Vec<Header>
|
||||
}
|
||||
|
||||
impl Encodable for Block {
|
||||
fn rlp_append(&self, s: &mut RlpStream) {
|
||||
s.begin_list(3);
|
||||
s.append(&self.header);
|
||||
s.append_list(&self.transactions);
|
||||
s.append_list(&self.uncles);
|
||||
}
|
||||
}
|
||||
|
||||
impl Forkable for Block {
|
||||
fn fork(mut self, fork_number: usize) -> Self where Self: Sized {
|
||||
let difficulty = self.header.difficulty().clone() - fork_number.into();
|
||||
self.header.set_difficulty(difficulty);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl WithBloom for Block {
|
||||
fn with_bloom(mut self, bloom: H2048) -> Self where Self: Sized {
|
||||
self.header.set_log_bloom(bloom);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl WithTransaction for Block {
|
||||
fn with_transaction(mut self, transaction: SignedTransaction) -> Self where Self: Sized {
|
||||
self.transactions.push(transaction);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl CompleteBlock for Block {
|
||||
fn complete(mut self, parent_hash: H256) -> Bytes {
|
||||
self.header.set_parent_hash(parent_hash);
|
||||
encode(&self).into_vec()
|
||||
}
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bigint::hash::H2048;
|
||||
|
||||
pub trait WithBloom {
|
||||
fn with_bloom(self, bloom: H2048) -> Self where Self: Sized;
|
||||
}
|
||||
|
||||
pub struct Bloom<'a, I> where I: 'a {
|
||||
pub iter: &'a mut I,
|
||||
pub bloom: H2048,
|
||||
}
|
||||
|
||||
impl<'a, I> Iterator for Bloom<'a, I> where I: Iterator, <I as Iterator>::Item: WithBloom {
|
||||
type Item = <I as Iterator>::Item;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next().map(|item| item.with_bloom(self.bloom.clone()))
|
||||
}
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bigint::hash::H256;
|
||||
use bytes::Bytes;
|
||||
use views::BlockView;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct BlockFinalizer {
|
||||
parent_hash: H256
|
||||
}
|
||||
|
||||
impl BlockFinalizer {
|
||||
pub fn fork(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CompleteBlock {
|
||||
fn complete(self, parent_hash: H256) -> Bytes;
|
||||
}
|
||||
|
||||
pub struct Complete<'a, I> where I: 'a {
|
||||
pub iter: &'a mut I,
|
||||
pub finalizer: &'a mut BlockFinalizer,
|
||||
}
|
||||
|
||||
impl<'a, I> Iterator for Complete<'a, I> where I: Iterator, <I as Iterator>::Item: CompleteBlock {
|
||||
type Item = Bytes;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next().map(|item| {
|
||||
let rlp = item.complete(self.finalizer.parent_hash.clone());
|
||||
self.finalizer.parent_hash = BlockView::new(&rlp).header_view().hash();
|
||||
rlp
|
||||
})
|
||||
}
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
pub trait Forkable {
|
||||
fn fork(self, fork_number: usize) -> Self where Self: Sized;
|
||||
}
|
||||
|
||||
pub struct Fork<I> {
|
||||
pub iter: I,
|
||||
pub fork_number: usize,
|
||||
}
|
||||
|
||||
impl<I> Clone for Fork<I> where I: Iterator + Clone {
|
||||
fn clone(&self) -> Self {
|
||||
Fork {
|
||||
iter: self.iter.clone(),
|
||||
fork_number: self.fork_number
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> Iterator for Fork<I> where I: Iterator, <I as Iterator>::Item: Forkable {
|
||||
type Item = <I as Iterator>::Item;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next().map(|item| item.fork(self.fork_number))
|
||||
}
|
||||
}
|
@ -1,179 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bigint::prelude::U256;
|
||||
use bigint::hash::H2048;
|
||||
use bytes::Bytes;
|
||||
use header::BlockNumber;
|
||||
use transaction::SignedTransaction;
|
||||
use super::fork::Fork;
|
||||
use super::bloom::Bloom;
|
||||
use super::complete::{BlockFinalizer, CompleteBlock, Complete};
|
||||
use super::block::Block;
|
||||
use super::transaction::Transaction;
|
||||
|
||||
/// Chain iterator interface.
|
||||
pub trait ChainIterator: Iterator + Sized {
|
||||
/// Should be called to create a fork of current iterator.
|
||||
/// Blocks generated by fork will have lower difficulty than current chain.
|
||||
fn fork(&self, fork_number: usize) -> Fork<Self> where Self: Clone;
|
||||
/// Should be called to make every consecutive block have given bloom.
|
||||
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self>;
|
||||
/// Should be called to make every consecutive block have given transaction.
|
||||
fn with_transaction(&mut self, transaction: SignedTransaction) -> Transaction<Self>;
|
||||
/// Should be called to complete block. Without complete, block may have incorrect hash.
|
||||
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>;
|
||||
/// Completes and generates block.
|
||||
fn generate<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Option<Bytes> where Self::Item: CompleteBlock;
|
||||
}
|
||||
|
||||
impl<I> ChainIterator for I where I: Iterator + Sized {
|
||||
fn fork(&self, fork_number: usize) -> Fork<Self> where I: Clone {
|
||||
Fork {
|
||||
iter: self.clone(),
|
||||
fork_number: fork_number
|
||||
}
|
||||
}
|
||||
|
||||
fn with_bloom(&mut self, bloom: H2048) -> Bloom<Self> {
|
||||
Bloom {
|
||||
iter: self,
|
||||
bloom: bloom
|
||||
}
|
||||
}
|
||||
|
||||
fn with_transaction(&mut self, transaction: SignedTransaction) -> Transaction<Self> {
|
||||
Transaction {
|
||||
iter: self,
|
||||
transaction: transaction,
|
||||
}
|
||||
}
|
||||
|
||||
fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self> {
|
||||
Complete {
|
||||
iter: self,
|
||||
finalizer: finalizer
|
||||
}
|
||||
}
|
||||
|
||||
fn generate<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Option<Bytes> where <I as Iterator>::Item: CompleteBlock {
|
||||
self.complete(finalizer).next()
|
||||
}
|
||||
}
|
||||
|
||||
/// Blockchain generator.
|
||||
#[derive(Clone)]
|
||||
pub struct ChainGenerator {
|
||||
/// Next block number.
|
||||
number: BlockNumber,
|
||||
/// Next block difficulty.
|
||||
difficulty: U256,
|
||||
}
|
||||
|
||||
impl ChainGenerator {
|
||||
fn prepare_block(&self) -> Block {
|
||||
let mut block = Block::default();
|
||||
block.header.set_number(self.number);
|
||||
block.header.set_difficulty(self.difficulty);
|
||||
block
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ChainGenerator {
|
||||
fn default() -> Self {
|
||||
ChainGenerator {
|
||||
number: 0,
|
||||
difficulty: 1000.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for ChainGenerator {
|
||||
type Item = Block;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let block = self.prepare_block();
|
||||
self.number += 1;
|
||||
Some(block)
|
||||
}
|
||||
}
|
||||
|
||||
mod tests {
|
||||
use bigint::hash::{H256, H2048};
|
||||
use views::BlockView;
|
||||
use blockchain::generator::{ChainIterator, ChainGenerator, BlockFinalizer};
|
||||
|
||||
#[test]
|
||||
fn canon_chain_generator() {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
|
||||
let genesis_rlp = canon_chain.generate(&mut finalizer).unwrap();
|
||||
let genesis = BlockView::new(&genesis_rlp);
|
||||
|
||||
assert_eq!(genesis.header_view().parent_hash(), H256::default());
|
||||
assert_eq!(genesis.header_view().number(), 0);
|
||||
|
||||
let b1_rlp = canon_chain.generate(&mut finalizer).unwrap();
|
||||
let b1 = BlockView::new(&b1_rlp);
|
||||
|
||||
assert_eq!(b1.header_view().parent_hash(), genesis.header_view().hash());
|
||||
assert_eq!(b1.header_view().number(), 1);
|
||||
|
||||
let mut fork_chain = canon_chain.fork(1);
|
||||
|
||||
let b2_rlp_fork = fork_chain.generate(&mut finalizer.fork()).unwrap();
|
||||
let b2_fork = BlockView::new(&b2_rlp_fork);
|
||||
|
||||
assert_eq!(b2_fork.header_view().parent_hash(), b1.header_view().hash());
|
||||
assert_eq!(b2_fork.header_view().number(), 2);
|
||||
|
||||
let b2_rlp = canon_chain.generate(&mut finalizer).unwrap();
|
||||
let b2 = BlockView::new(&b2_rlp);
|
||||
|
||||
assert_eq!(b2.header_view().parent_hash(), b1.header_view().hash());
|
||||
assert_eq!(b2.header_view().number(), 2);
|
||||
assert!(b2.header_view().difficulty() > b2_fork.header_view().difficulty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn with_bloom_generator() {
|
||||
let bloom = H2048([0x1; 256]);
|
||||
let mut gen = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
|
||||
let block0_rlp = gen.with_bloom(bloom).generate(&mut finalizer).unwrap();
|
||||
let block1_rlp = gen.generate(&mut finalizer).unwrap();
|
||||
let block0 = BlockView::new(&block0_rlp);
|
||||
let block1 = BlockView::new(&block1_rlp);
|
||||
|
||||
assert_eq!(block0.header_view().number(), 0);
|
||||
assert_eq!(block0.header_view().parent_hash(), H256::default());
|
||||
|
||||
assert_eq!(block1.header_view().number(), 1);
|
||||
assert_eq!(block1.header_view().parent_hash(), block0.header_view().hash());
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generate_1000_blocks() {
|
||||
let generator = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
let blocks: Vec<_> = generator.take(1000).complete(&mut finalizer).collect();
|
||||
assert_eq!(blocks.len(), 1000);
|
||||
}
|
||||
}
|
||||
|
@ -1,27 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Blockchain generator for tests.
|
||||
|
||||
mod bloom;
|
||||
mod block;
|
||||
mod complete;
|
||||
mod fork;
|
||||
pub mod generator;
|
||||
mod transaction;
|
||||
|
||||
pub use self::complete::BlockFinalizer;
|
||||
pub use self::generator::{ChainIterator, ChainGenerator};
|
@ -1,35 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use transaction::SignedTransaction;
|
||||
|
||||
pub trait WithTransaction {
|
||||
fn with_transaction(self, transaction: SignedTransaction) -> Self where Self: Sized;
|
||||
}
|
||||
|
||||
pub struct Transaction<'a, I> where I: 'a {
|
||||
pub iter: &'a mut I,
|
||||
pub transaction: SignedTransaction,
|
||||
}
|
||||
|
||||
impl <'a, I> Iterator for Transaction<'a, I> where I: Iterator, <I as Iterator>::Item: WithTransaction {
|
||||
type Item = <I as Iterator>::Item;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next().map(|item| item.with_transaction(self.transaction.clone()))
|
||||
}
|
||||
}
|
@ -2,8 +2,8 @@ use std::collections::HashMap;
|
||||
use bigint::hash::H256;
|
||||
use header::BlockNumber;
|
||||
use blockchain::block_info::BlockInfo;
|
||||
use blooms::BloomGroup;
|
||||
use super::extras::{BlockDetails, BlockReceipts, TransactionAddress, LogGroupPosition};
|
||||
use blockchain::extras::{BlockDetails, BlockReceipts, TransactionAddress};
|
||||
use blooms::{BloomGroup, GroupPosition};
|
||||
|
||||
/// Block extras update info.
|
||||
pub struct ExtrasUpdate<'a> {
|
||||
@ -20,7 +20,7 @@ pub struct ExtrasUpdate<'a> {
|
||||
/// Modified block receipts.
|
||||
pub block_receipts: HashMap<H256, BlockReceipts>,
|
||||
/// Modified blocks blooms.
|
||||
pub blocks_blooms: HashMap<LogGroupPosition, BloomGroup>,
|
||||
pub blocks_blooms: HashMap<GroupPosition, BloomGroup>,
|
||||
/// Modified transaction addresses (None signifies removed transactions).
|
||||
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
|
||||
}
|
||||
|
@ -28,13 +28,6 @@ impl From<LogBloom> for Bloom {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bc::Bloom> for Bloom {
|
||||
fn from(bloom: bc::Bloom) -> Self {
|
||||
let bytes: [u8; 256] = bloom.into();
|
||||
Bloom(LogBloom::from(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<bc::Bloom> for Bloom {
|
||||
fn into(self) -> bc::Bloom {
|
||||
let log = self.0;
|
||||
|
@ -15,58 +15,39 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bloomchain::group as bc;
|
||||
use rlp::*;
|
||||
use bloomchain::{Bloom, BloomCompat};
|
||||
use heapsize::HeapSizeOf;
|
||||
use super::Bloom;
|
||||
|
||||
/// Represents group of X consecutive blooms.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||
pub struct BloomGroup {
|
||||
blooms: Vec<Bloom>,
|
||||
}
|
||||
|
||||
impl BloomGroup {
|
||||
pub fn accrue_bloom_group(&mut self, group: &BloomGroup) {
|
||||
for (bloom, other) in self.blooms.iter_mut().zip(group.blooms.iter()) {
|
||||
bloom.accrue_bloom(other);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bc::BloomGroup> for BloomGroup {
|
||||
fn from(group: bc::BloomGroup) -> Self {
|
||||
let blooms = group.blooms
|
||||
.into_iter()
|
||||
.map(From::from)
|
||||
.collect();
|
||||
|
||||
BloomGroup {
|
||||
blooms: blooms
|
||||
blooms: group.blooms
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<bc::BloomGroup> for BloomGroup {
|
||||
fn into(self) -> bc::BloomGroup {
|
||||
let blooms = self.blooms
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
|
||||
bc::BloomGroup {
|
||||
blooms: blooms
|
||||
blooms: self.blooms
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable for BloomGroup {
|
||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
||||
let blooms = rlp.as_list()?;
|
||||
let group = BloomGroup {
|
||||
blooms: blooms
|
||||
};
|
||||
Ok(group)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable for BloomGroup {
|
||||
fn rlp_append(&self, s: &mut RlpStream) {
|
||||
s.append_list(&self.blooms);
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSizeOf for BloomGroup {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
self.blooms.heap_size_of_children()
|
||||
|
@ -50,9 +50,9 @@ use encoded;
|
||||
use engines::{EthEngine, EpochTransition};
|
||||
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
|
||||
use vm::{EnvInfo, LastHashes};
|
||||
use evm::{Factory as EvmFactory, Schedule};
|
||||
use evm::Schedule;
|
||||
use executive::{Executive, Executed, TransactOptions, contract_address};
|
||||
use factory::Factories;
|
||||
use factory::{Factories, VmFactory};
|
||||
use futures::{future, Future};
|
||||
use header::{BlockNumber, Header};
|
||||
use io::*;
|
||||
@ -189,7 +189,7 @@ impl Client {
|
||||
|
||||
let trie_factory = TrieFactory::new(trie_spec);
|
||||
let factories = Factories {
|
||||
vm: EvmFactory::new(config.vm_type.clone(), config.jump_table_size),
|
||||
vm: VmFactory::new(config.vm_type.clone(), config.jump_table_size),
|
||||
trie: trie_factory,
|
||||
accountdb: Default::default(),
|
||||
};
|
||||
@ -1910,7 +1910,7 @@ impl MiningBlockChainClient for Client {
|
||||
block
|
||||
}
|
||||
|
||||
fn vm_factory(&self) -> &EvmFactory {
|
||||
fn vm_factory(&self) -> &VmFactory {
|
||||
&self.factories.vm
|
||||
}
|
||||
|
||||
|
@ -20,12 +20,11 @@ use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use bigint::prelude::U256;
|
||||
use bigint::hash::H256;
|
||||
use journaldb;
|
||||
use {trie, kvdb_memorydb, bytes};
|
||||
use {factory, journaldb, trie, kvdb_memorydb, bytes};
|
||||
use kvdb::{self, KeyValueDB};
|
||||
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
|
||||
use factory::Factories;
|
||||
use evm::{self, VMType, FinalizationResult};
|
||||
use evm::{VMType, FinalizationResult};
|
||||
use vm::{self, ActionParams};
|
||||
|
||||
/// EVM test Error.
|
||||
@ -120,7 +119,7 @@ impl<'a> EvmTestClient<'a> {
|
||||
|
||||
fn factories() -> Factories {
|
||||
Factories {
|
||||
vm: evm::Factory::new(VMType::Interpreter, 5 * 1024),
|
||||
vm: factory::VmFactory::new(VMType::Interpreter, 5 * 1024),
|
||||
trie: trie::TrieFactory::new(trie::TrieSpec::Secure),
|
||||
accountdb: Default::default(),
|
||||
}
|
||||
|
@ -47,7 +47,8 @@ use log_entry::LocalizedLogEntry;
|
||||
use receipt::{Receipt, LocalizedReceipt, TransactionOutcome};
|
||||
use blockchain::extras::BlockReceipts;
|
||||
use error::{ImportResult, Error as EthcoreError};
|
||||
use evm::{Factory as EvmFactory, VMType};
|
||||
use evm::VMType;
|
||||
use factory::VmFactory;
|
||||
use vm::Schedule;
|
||||
use miner::{Miner, MinerService, TransactionImportResult};
|
||||
use spec::Spec;
|
||||
@ -98,7 +99,7 @@ pub struct TestBlockChainClient {
|
||||
/// Spec
|
||||
pub spec: Spec,
|
||||
/// VM Factory
|
||||
pub vm_factory: EvmFactory,
|
||||
pub vm_factory: VmFactory,
|
||||
/// Timestamp assigned to latest sealed block
|
||||
pub latest_block_timestamp: RwLock<u64>,
|
||||
/// Ancient block info.
|
||||
@ -169,7 +170,7 @@ impl TestBlockChainClient {
|
||||
queue_size: AtomicUsize::new(0),
|
||||
miner: Arc::new(Miner::with_spec(&spec)),
|
||||
spec: spec,
|
||||
vm_factory: EvmFactory::new(VMType::Interpreter, 1024 * 1024),
|
||||
vm_factory: VmFactory::new(VMType::Interpreter, 1024 * 1024),
|
||||
latest_block_timestamp: RwLock::new(10_000_000),
|
||||
ancient_block: RwLock::new(None),
|
||||
first_block: RwLock::new(None),
|
||||
@ -399,7 +400,7 @@ impl MiningBlockChainClient for TestBlockChainClient {
|
||||
block.reopen(&*self.spec.engine)
|
||||
}
|
||||
|
||||
fn vm_factory(&self) -> &EvmFactory {
|
||||
fn vm_factory(&self) -> &VmFactory {
|
||||
&self.vm_factory
|
||||
}
|
||||
|
||||
|
@ -21,9 +21,9 @@ use block::{OpenBlock, SealedBlock, ClosedBlock};
|
||||
use blockchain::TreeRoute;
|
||||
use encoded;
|
||||
use vm::LastHashes;
|
||||
use error::{ImportResult, CallError, Error as EthcoreError};
|
||||
use error::{TransactionImportResult, BlockImportError};
|
||||
use evm::{Factory as EvmFactory, Schedule};
|
||||
use error::{ImportResult, CallError, Error as EthcoreError, TransactionImportResult, BlockImportError};
|
||||
use evm::Schedule;
|
||||
use factory::VmFactory;
|
||||
use executive::Executed;
|
||||
use filter::Filter;
|
||||
use header::{BlockNumber};
|
||||
@ -298,7 +298,7 @@ pub trait MiningBlockChainClient: BlockChainClient {
|
||||
fn reopen_block(&self, block: ClosedBlock) -> OpenBlock;
|
||||
|
||||
/// Returns EvmFactory.
|
||||
fn vm_factory(&self) -> &EvmFactory;
|
||||
fn vm_factory(&self) -> &VmFactory;
|
||||
|
||||
/// Broadcast a block proposal.
|
||||
fn broadcast_proposal_block(&self, block: SealedBlock);
|
||||
|
@ -44,8 +44,7 @@ use bigint::hash::{H256, H520};
|
||||
use semantic_version::SemanticVersion;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use unexpected::{Mismatch, OutOfBounds};
|
||||
use util::*;
|
||||
use bytes::Bytes;
|
||||
use util::Address;
|
||||
|
||||
mod finality;
|
||||
|
||||
@ -291,9 +290,11 @@ struct EpochVerifier {
|
||||
|
||||
impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
|
||||
fn verify_light(&self, header: &Header) -> Result<(), Error> {
|
||||
// Validate the timestamp
|
||||
verify_timestamp(&*self.step, header_step(header)?)?;
|
||||
// always check the seal since it's fast.
|
||||
// nothing heavier to do.
|
||||
verify_external(header, &self.subchain_validators, &*self.step, |_| {})
|
||||
verify_external(header, &self.subchain_validators)
|
||||
}
|
||||
|
||||
fn check_finality_proof(&self, proof: &[u8]) -> Option<Vec<H256>> {
|
||||
@ -317,7 +318,7 @@ impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
|
||||
//
|
||||
// `verify_external` checks that signature is correct and author == signer.
|
||||
if header.seal().len() != 2 { return None }
|
||||
otry!(verify_external(header, &self.subchain_validators, &*self.step, |_| {}).ok());
|
||||
otry!(verify_external(header, &self.subchain_validators).ok());
|
||||
|
||||
let newly_finalized = otry!(finality_checker.push_hash(header.hash(), header.author().clone()).ok());
|
||||
finalized.extend(newly_finalized);
|
||||
@ -327,16 +328,6 @@ impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
|
||||
}
|
||||
}
|
||||
|
||||
// Report misbehavior
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
enum Report {
|
||||
// Malicious behavior
|
||||
Malicious(Address, BlockNumber, Bytes),
|
||||
// benign misbehavior
|
||||
Benign(Address, BlockNumber),
|
||||
}
|
||||
|
||||
fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> {
|
||||
UntrustedRlp::new(&header.seal().get(0).expect("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)")).as_val()
|
||||
}
|
||||
@ -355,34 +346,35 @@ fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: usize, address:
|
||||
step_proposer(validators, bh, step) == *address
|
||||
}
|
||||
|
||||
fn verify_external<F: Fn(Report)>(header: &Header, validators: &ValidatorSet, step: &Step, report: F)
|
||||
-> Result<(), Error>
|
||||
{
|
||||
let header_step = header_step(header)?;
|
||||
|
||||
fn verify_timestamp(step: &Step, header_step: usize) -> Result<(), BlockError> {
|
||||
match step.check_future(header_step) {
|
||||
Err(None) => {
|
||||
trace!(target: "engine", "verify_block_external: block from the future");
|
||||
report(Report::Benign(*header.author(), header.number()));
|
||||
return Err(BlockError::InvalidSeal.into())
|
||||
trace!(target: "engine", "verify_timestamp: block from the future");
|
||||
Err(BlockError::InvalidSeal.into())
|
||||
},
|
||||
Err(Some(oob)) => {
|
||||
trace!(target: "engine", "verify_block_external: block too early");
|
||||
return Err(BlockError::TemporarilyInvalid(oob).into())
|
||||
// NOTE This error might be returned only in early stage of verification (Stage 1).
|
||||
// Returning it further won't recover the sync process.
|
||||
trace!(target: "engine", "verify_timestamp: block too early");
|
||||
Err(BlockError::TemporarilyInvalid(oob).into())
|
||||
},
|
||||
Ok(_) => {
|
||||
let proposer_signature = header_signature(header)?;
|
||||
let correct_proposer = validators.get(header.parent_hash(), header_step);
|
||||
let is_invalid_proposer = *header.author() != correct_proposer ||
|
||||
!verify_address(&correct_proposer, &proposer_signature, &header.bare_hash())?;
|
||||
Ok(_) => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
if is_invalid_proposer {
|
||||
trace!(target: "engine", "verify_block_external: bad proposer for step: {}", header_step);
|
||||
Err(EngineError::NotProposer(Mismatch { expected: correct_proposer, found: header.author().clone() }))?
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Error> {
|
||||
let header_step = header_step(header)?;
|
||||
|
||||
let proposer_signature = header_signature(header)?;
|
||||
let correct_proposer = validators.get(header.parent_hash(), header_step);
|
||||
let is_invalid_proposer = *header.author() != correct_proposer ||
|
||||
!verify_address(&correct_proposer, &proposer_signature, &header.bare_hash())?;
|
||||
|
||||
if is_invalid_proposer {
|
||||
trace!(target: "engine", "verify_block_external: bad proposer for step: {}", header_step);
|
||||
Err(EngineError::NotProposer(Mismatch { expected: correct_proposer, found: header.author().clone() }))?
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -655,26 +647,38 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
/// Check the number of seal fields.
|
||||
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
|
||||
if header.number() >= self.validate_score_transition && *header.difficulty() >= U256::from(U128::max_value()) {
|
||||
Err(From::from(BlockError::DifficultyOutOfBounds(
|
||||
return Err(From::from(BlockError::DifficultyOutOfBounds(
|
||||
OutOfBounds { min: None, max: Some(U256::from(U128::max_value())), found: *header.difficulty() }
|
||||
)))
|
||||
} else {
|
||||
Ok(())
|
||||
)));
|
||||
}
|
||||
|
||||
// TODO [ToDr] Should this go from epoch manager?
|
||||
// If yes then probably benign reporting needs to be moved further in the verification.
|
||||
let set_number = header.number();
|
||||
|
||||
match verify_timestamp(&*self.step, header_step(header)?) {
|
||||
Err(BlockError::InvalidSeal) => {
|
||||
self.validators.report_benign(header.author(), set_number, header.number());
|
||||
Err(BlockError::InvalidSeal.into())
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
Ok(()) => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Do the step and gas limit validation.
|
||||
fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> {
|
||||
let step = header_step(header)?;
|
||||
|
||||
let parent_step = header_step(parent)?;
|
||||
// TODO [ToDr] Should this go from epoch manager?
|
||||
let set_number = header.number();
|
||||
|
||||
// Ensure header is from the step after parent.
|
||||
if step == parent_step
|
||||
|| (header.number() >= self.validate_step_transition && step <= parent_step) {
|
||||
trace!(target: "engine", "Multiple blocks proposed for step {}.", parent_step);
|
||||
|
||||
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
|
||||
self.validators.report_malicious(header.author(), set_number, header.number(), Default::default());
|
||||
Err(EngineError::DoubleVote(header.author().clone()))?;
|
||||
}
|
||||
|
||||
@ -687,7 +691,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
let skipped_primary = step_proposer(&*self.validators, &parent.hash(), s);
|
||||
// Do not report this signer.
|
||||
if skipped_primary != me {
|
||||
self.validators.report_benign(&skipped_primary, header.number(), header.number());
|
||||
self.validators.report_benign(&skipped_primary, set_number, header.number());
|
||||
}
|
||||
// Stop reporting once validators start repeating.
|
||||
if !reported.insert(skipped_primary) { break; }
|
||||
@ -702,9 +706,8 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
// fetch correct validator set for current epoch, taking into account
|
||||
// finality of previous transitions.
|
||||
let active_set;
|
||||
|
||||
let (validators, set_number) = if self.immediate_transitions {
|
||||
(&*self.validators, header.number())
|
||||
let validators = if self.immediate_transitions {
|
||||
&*self.validators
|
||||
} else {
|
||||
// get correct validator set for epoch.
|
||||
let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) {
|
||||
@ -722,21 +725,12 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
||||
}
|
||||
|
||||
active_set = epoch_manager.validators().clone();
|
||||
(&active_set as &_, epoch_manager.epoch_transition_number)
|
||||
};
|
||||
|
||||
// always report with "self.validators" so that the report actually gets
|
||||
// to the contract.
|
||||
let report = |report| match report {
|
||||
Report::Benign(address, block_number) =>
|
||||
self.validators.report_benign(&address, set_number, block_number),
|
||||
Report::Malicious(address, block_number, proof) =>
|
||||
self.validators.report_malicious(&address, set_number, block_number, proof),
|
||||
&active_set as &_
|
||||
};
|
||||
|
||||
// verify signature against fixed list, but reports should go to the
|
||||
// contract itself.
|
||||
verify_external(header, validators, &*self.step, report)
|
||||
verify_external(header, validators)
|
||||
}
|
||||
|
||||
fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result<Vec<u8>, String> {
|
||||
@ -1059,8 +1053,7 @@ mod tests {
|
||||
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
||||
assert!(engine.verify_block_external(&header).is_ok());
|
||||
header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
|
||||
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
||||
assert!(engine.verify_block_external(&header).is_err());
|
||||
assert!(engine.verify_block_basic(&header).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1200,3 +1193,4 @@ mod tests {
|
||||
AuthorityRound::new(params, machine).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -390,11 +390,6 @@ pub trait EthEngine: Engine<::machine::EthereumMachine> {
|
||||
self.machine().verify_transaction_basic(t, header)
|
||||
}
|
||||
|
||||
/// If this machine supports wasm.
|
||||
fn supports_wasm(&self) -> bool {
|
||||
self.machine().supports_wasm()
|
||||
}
|
||||
|
||||
/// Additional information.
|
||||
fn additional_params(&self) -> HashMap<String, String> {
|
||||
self.machine().additional_params()
|
||||
|
@ -27,7 +27,7 @@ mod params;
|
||||
|
||||
use std::sync::{Weak, Arc};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
|
||||
use std::collections::{HashSet, BTreeMap};
|
||||
use std::collections::HashSet;
|
||||
use hash::keccak;
|
||||
use bigint::prelude::{U128, U256};
|
||||
use bigint::hash::{H256, H520};
|
||||
@ -454,17 +454,6 @@ impl Engine<EthereumMachine> for Tendermint {
|
||||
|
||||
fn maximum_uncle_age(&self) -> usize { 0 }
|
||||
|
||||
/// Additional engine-specific information for the user/developer concerning `header`.
|
||||
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
|
||||
let message = ConsensusMessage::new_proposal(header).expect("Invalid header.");
|
||||
map![
|
||||
"signature".into() => message.signature.to_string(),
|
||||
"height".into() => message.vote_step.height.to_string(),
|
||||
"view".into() => message.vote_step.view.to_string(),
|
||||
"block_hash".into() => message.block_hash.as_ref().map(ToString::to_string).unwrap_or("".into())
|
||||
]
|
||||
}
|
||||
|
||||
fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
|
||||
// Chain scoring: total weight is sqrt(U256::max_value())*height - view
|
||||
let new_difficulty = U256::from(U128::max_value())
|
||||
|
@ -192,7 +192,7 @@ mod tests {
|
||||
header.set_number(2);
|
||||
header.set_parent_hash(client.chain_info().best_block_hash);
|
||||
// `reportBenign` when the designated proposer releases block from the future (bad clock).
|
||||
assert!(client.engine().verify_block_external(&header).is_err());
|
||||
assert!(client.engine().verify_block_basic(&header).is_err());
|
||||
// Seal a block.
|
||||
client.engine().step();
|
||||
assert_eq!(client.chain_info().best_block_number, 1);
|
||||
|
@ -29,7 +29,7 @@ use error::{BlockError, Error};
|
||||
use header::{Header, BlockNumber};
|
||||
use engines::{self, Engine};
|
||||
use ethjson;
|
||||
use rlp::{self, UntrustedRlp};
|
||||
use rlp::UntrustedRlp;
|
||||
use machine::EthereumMachine;
|
||||
use semantic_version::SemanticVersion;
|
||||
|
||||
@ -41,6 +41,38 @@ const MAX_SNAPSHOT_BLOCKS: u64 = 30000;
|
||||
|
||||
const DEFAULT_EIP649_DELAY: u64 = 3_000_000;
|
||||
|
||||
/// Ethash specific seal
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct Seal {
|
||||
/// Ethash seal mix_hash
|
||||
pub mix_hash: H256,
|
||||
/// Ethash seal nonce
|
||||
pub nonce: H64,
|
||||
}
|
||||
|
||||
impl Seal {
|
||||
/// Tries to parse rlp as ethash seal.
|
||||
pub fn parse_seal<T: AsRef<[u8]>>(seal: &[T]) -> Result<Self, Error> {
|
||||
if seal.len() != 2 {
|
||||
return Err(BlockError::InvalidSealArity(
|
||||
Mismatch {
|
||||
expected: 2,
|
||||
found: seal.len()
|
||||
}
|
||||
).into());
|
||||
}
|
||||
|
||||
let mix_hash = UntrustedRlp::new(seal[0].as_ref()).as_val::<H256>()?;
|
||||
let nonce = UntrustedRlp::new(seal[1].as_ref()).as_val::<H64>()?;
|
||||
let seal = Seal {
|
||||
mix_hash,
|
||||
nonce,
|
||||
};
|
||||
|
||||
Ok(seal)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ethash params.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct EthashParams {
|
||||
@ -177,13 +209,12 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
|
||||
/// Additional engine-specific information for the user/developer concerning `header`.
|
||||
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
|
||||
if header.seal().len() == self.seal_fields() {
|
||||
map![
|
||||
"nonce".to_owned() => format!("0x{}", header.nonce().hex()),
|
||||
"mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())
|
||||
]
|
||||
} else {
|
||||
BTreeMap::default()
|
||||
match Seal::parse_seal(header.seal()) {
|
||||
Ok(seal) => map![
|
||||
"nonce".to_owned() => format!("0x{}", seal.nonce.hex()),
|
||||
"mixHash".to_owned() => format!("0x{}", seal.mix_hash.hex())
|
||||
],
|
||||
_ => BTreeMap::default()
|
||||
}
|
||||
}
|
||||
|
||||
@ -269,13 +300,7 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
|
||||
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
|
||||
// check the seal fields.
|
||||
if header.seal().len() != self.seal_fields() {
|
||||
return Err(From::from(BlockError::InvalidSealArity(
|
||||
Mismatch { expected: self.seal_fields(), found: header.seal().len() }
|
||||
)));
|
||||
}
|
||||
UntrustedRlp::new(&header.seal()[0]).as_val::<H256>()?;
|
||||
UntrustedRlp::new(&header.seal()[1]).as_val::<H64>()?;
|
||||
let seal = Seal::parse_seal(header.seal())?;
|
||||
|
||||
// TODO: consider removing these lines.
|
||||
let min_difficulty = self.ethash_params.minimum_difficulty;
|
||||
@ -285,9 +310,10 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
|
||||
let difficulty = Ethash::boundary_to_difficulty(&H256(quick_get_difficulty(
|
||||
&header.bare_hash().0,
|
||||
header.nonce().low_u64(),
|
||||
&header.mix_hash().0
|
||||
seal.nonce.low_u64(),
|
||||
&seal.mix_hash.0
|
||||
)));
|
||||
|
||||
if &difficulty < header.difficulty() {
|
||||
return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
|
||||
}
|
||||
@ -300,17 +326,20 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
}
|
||||
|
||||
fn verify_block_unordered(&self, header: &Header) -> Result<(), Error> {
|
||||
if header.seal().len() != self.seal_fields() {
|
||||
return Err(From::from(BlockError::InvalidSealArity(
|
||||
Mismatch { expected: self.seal_fields(), found: header.seal().len() }
|
||||
)));
|
||||
}
|
||||
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, header.nonce().low_u64());
|
||||
let seal = Seal::parse_seal(header.seal())?;
|
||||
|
||||
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, seal.nonce.low_u64());
|
||||
let mix = H256(result.mix_hash);
|
||||
let difficulty = Ethash::boundary_to_difficulty(&H256(result.value));
|
||||
trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, H256(slow_hash_block_number(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), H256(result.mix_hash), H256(result.value));
|
||||
if mix != header.mix_hash() {
|
||||
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() })));
|
||||
trace!(target: "miner", "num: {num}, seed: {seed}, h: {h}, non: {non}, mix: {mix}, res: {res}",
|
||||
num = header.number() as u64,
|
||||
seed = H256(slow_hash_block_number(header.number() as u64)),
|
||||
h = header.bare_hash(),
|
||||
non = seal.nonce.low_u64(),
|
||||
mix = H256(result.mix_hash),
|
||||
res = H256(result.value));
|
||||
if mix != seal.mix_hash {
|
||||
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: seal.mix_hash })));
|
||||
}
|
||||
if &difficulty < header.difficulty() {
|
||||
return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
|
||||
@ -441,18 +470,6 @@ impl Ethash {
|
||||
}
|
||||
}
|
||||
|
||||
impl Header {
|
||||
/// Get the nonce field of the header.
|
||||
pub fn nonce(&self) -> H64 {
|
||||
rlp::decode(&self.seal()[1])
|
||||
}
|
||||
|
||||
/// Get the mix hash field of the header.
|
||||
pub fn mix_hash(&self) -> H256 {
|
||||
rlp::decode(&self.seal()[0])
|
||||
}
|
||||
}
|
||||
|
||||
fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256) {
|
||||
let eras = if block_number != 0 && block_number % era_rounds == 0 {
|
||||
block_number / era_rounds - 1
|
||||
@ -637,7 +654,7 @@ mod tests {
|
||||
#[test]
|
||||
fn can_do_seal_unordered_verification_fail() {
|
||||
let engine = test_spec().engine;
|
||||
let header: Header = Header::default();
|
||||
let header = Header::default();
|
||||
|
||||
let verify_result = engine.verify_block_unordered(&header);
|
||||
|
||||
@ -648,6 +665,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_do_seal_unordered_verification_fail2() {
|
||||
let engine = test_spec().engine;
|
||||
let mut header = Header::default();
|
||||
header.set_seal(vec![vec![], vec![]]);
|
||||
|
||||
let verify_result = engine.verify_block_unordered(&header);
|
||||
// rlp error, shouldn't panic
|
||||
assert!(verify_result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_do_seal256_verification_fail() {
|
||||
let engine = test_spec().engine;
|
||||
|
@ -141,6 +141,9 @@ pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(inclu
|
||||
/// Create a new Musicoin-MCIP3-era spec.
|
||||
pub fn new_mcip3_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) }
|
||||
|
||||
/// Create new Kovan spec with wasm activated at certain block
|
||||
pub fn new_kovan_wasm_test_machine() -> EthereumMachine { load_machine(include_bytes!("../../res/ethereum/kovan_wasm_test.json")) }
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bigint::prelude::U256;
|
||||
|
@ -24,23 +24,33 @@ use util::*;
|
||||
use bytes::{Bytes, BytesRef};
|
||||
use state::{Backend as StateBackend, State, Substate, CleanupMode};
|
||||
use machine::EthereumMachine as Machine;
|
||||
use vm::EnvInfo;
|
||||
use error::ExecutionError;
|
||||
use evm::{CallType, Factory, Finalize, FinalizationResult};
|
||||
use vm::{self, Ext, CreateContractAddress, ReturnData, CleanDustMode, ActionParams, ActionValue};
|
||||
use wasm;
|
||||
use evm::{CallType, Finalize, FinalizationResult};
|
||||
use vm::{
|
||||
self, Ext, EnvInfo, CreateContractAddress, ReturnData, CleanDustMode, ActionParams,
|
||||
ActionValue, Schedule,
|
||||
};
|
||||
use externalities::*;
|
||||
use trace::{self, Tracer, VMTracer};
|
||||
use transaction::{Action, SignedTransaction};
|
||||
use crossbeam;
|
||||
pub use executed::{Executed, ExecutionResult};
|
||||
|
||||
/// Roughly estimate what stack size each level of evm depth will use
|
||||
/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132)
|
||||
/// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp`
|
||||
const STACK_SIZE_PER_DEPTH: usize = 24*1024;
|
||||
#[cfg(debug_assertions)]
|
||||
/// Roughly estimate what stack size each level of evm depth will use. (Debug build)
|
||||
const STACK_SIZE_PER_DEPTH: usize = 128 * 1024;
|
||||
|
||||
const WASM_MAGIC_NUMBER: &'static [u8; 4] = b"\0asm";
|
||||
#[cfg(not(debug_assertions))]
|
||||
/// Roughly estimate what stack size each level of evm depth will use.
|
||||
const STACK_SIZE_PER_DEPTH: usize = 24 * 1024;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
/// Entry stack overhead prior to execution. (Debug build)
|
||||
const STACK_SIZE_ENTRY_OVERHEAD: usize = 100 * 1024;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
/// Entry stack overhead prior to execution.
|
||||
const STACK_SIZE_ENTRY_OVERHEAD: usize = 20 * 1024;
|
||||
|
||||
/// Returns new address created from address, nonce, and code hash
|
||||
pub fn contract_address(address_scheme: CreateContractAddress, sender: &Address, nonce: &U256, code: &[u8]) -> (Address, Option<H256>) {
|
||||
@ -154,18 +164,6 @@ impl TransactOptions<trace::NoopTracer, trace::NoopVMTracer> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn executor(machine: &Machine, vm_factory: &Factory, params: &ActionParams) -> Box<vm::Vm> {
|
||||
if machine.supports_wasm() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) {
|
||||
Box::new(
|
||||
wasm::WasmInterpreter::new()
|
||||
// prefer to fail fast
|
||||
.expect("Failed to create wasm runtime")
|
||||
)
|
||||
} else {
|
||||
vm_factory.create(params.gas)
|
||||
}
|
||||
}
|
||||
|
||||
/// Transaction executor.
|
||||
pub struct Executive<'a, B: 'a + StateBackend> {
|
||||
state: &'a mut State<B>,
|
||||
@ -340,35 +338,35 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
|
||||
fn exec_vm<T, V>(
|
||||
&mut self,
|
||||
schedule: Schedule,
|
||||
params: ActionParams,
|
||||
unconfirmed_substate: &mut Substate,
|
||||
output_policy: OutputPolicy,
|
||||
tracer: &mut T,
|
||||
vm_tracer: &mut V
|
||||
) -> vm::Result<FinalizationResult> where T: Tracer, V: VMTracer {
|
||||
|
||||
let depth_threshold = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get() / STACK_SIZE_PER_DEPTH);
|
||||
let local_stack_size = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get());
|
||||
let depth_threshold = local_stack_size.saturating_sub(STACK_SIZE_ENTRY_OVERHEAD) / STACK_SIZE_PER_DEPTH;
|
||||
let static_call = params.call_type == CallType::StaticCall;
|
||||
|
||||
// Ordinary execution - keep VM in same thread
|
||||
if (self.depth + 1) % depth_threshold != 0 {
|
||||
if self.depth != depth_threshold {
|
||||
let vm_factory = self.state.vm_factory();
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call);
|
||||
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
|
||||
return executor(self.machine, &vm_factory, ¶ms).exec(params, &mut ext).finalize(ext);
|
||||
let mut vm = vm_factory.create(¶ms, &schedule);
|
||||
return vm.exec(params, &mut ext).finalize(ext);
|
||||
}
|
||||
|
||||
// Start in new thread to reset stack
|
||||
// TODO [todr] No thread builder yet, so we need to reset once for a while
|
||||
// https://github.com/aturon/crossbeam/issues/16
|
||||
// Start in new thread with stack size needed up to max depth
|
||||
crossbeam::scope(|scope| {
|
||||
let machine = self.machine;
|
||||
let vm_factory = self.state.vm_factory();
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call);
|
||||
|
||||
scope.spawn(move || {
|
||||
executor(machine, &vm_factory, ¶ms).exec(params, &mut ext).finalize(ext)
|
||||
})
|
||||
scope.builder().stack_size(::std::cmp::max(schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, local_stack_size)).spawn(move || {
|
||||
let mut vm = vm_factory.create(¶ms, &schedule);
|
||||
vm.exec(params, &mut ext).finalize(ext)
|
||||
}).expect("Sub-thread creation cannot fail; the host might run out of resources; qed")
|
||||
}).join()
|
||||
}
|
||||
|
||||
@ -477,7 +475,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("scope is conditional on params.code.is_some(); qed"));
|
||||
|
||||
let res = {
|
||||
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return(output, trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
|
||||
self.exec_vm(schedule, params, &mut unconfirmed_substate, OutputPolicy::Return(output, trace_output.as_mut()), &mut subtracer, &mut subvmtracer)
|
||||
};
|
||||
|
||||
vm_tracer.done_subtrace(subvmtracer);
|
||||
@ -568,9 +566,14 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
|
||||
let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("two ways into create (Externalities::create and Executive::transact_with_tracer); both place `Some(...)` `code` in `params`; qed"));
|
||||
|
||||
let res = {
|
||||
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract(output.as_mut().or(trace_output.as_mut())), &mut subtracer, &mut subvmtracer)
|
||||
};
|
||||
let res = self.exec_vm(
|
||||
schedule,
|
||||
params,
|
||||
&mut unconfirmed_substate,
|
||||
OutputPolicy::InitContract(output.as_mut().or(trace_output.as_mut())),
|
||||
&mut subtracer,
|
||||
&mut subvmtracer
|
||||
);
|
||||
|
||||
vm_tracer.done_subtrace(subvmtracer);
|
||||
|
||||
@ -1489,8 +1492,6 @@ mod tests {
|
||||
params.gas = U256::from(20025);
|
||||
params.code = Some(Arc::new(code));
|
||||
params.value = ActionValue::Transfer(U256::zero());
|
||||
let mut state = get_temp_state_with_factory(factory);
|
||||
state.add_balance(&sender, &U256::from_str("152d02c7e14af68000000").unwrap(), CleanupMode::NoEmpty).unwrap();
|
||||
let info = EnvInfo::default();
|
||||
let machine = ::ethereum::new_byzantium_test_machine();
|
||||
let mut substate = Substate::new();
|
||||
@ -1505,4 +1506,60 @@ mod tests {
|
||||
assert_eq!(output[..], returns[..]);
|
||||
assert_eq!(state.storage_at(&contract_address, &H256::from(&U256::zero())).unwrap(), H256::from(&U256::from(0)));
|
||||
}
|
||||
|
||||
fn wasm_sample_code() -> Arc<Vec<u8>> {
|
||||
Arc::new(
|
||||
"0061736d01000000010d0360027f7f0060017f0060000002270303656e7603726574000003656e760673656e646572000103656e76066d656d6f727902010110030201020404017000000501000708010463616c6c00020901000ac10101be0102057f017e4100410028020441c0006b22043602042004412c6a41106a220041003602002004412c6a41086a22014200370200200441186a41106a22024100360200200441186a41086a220342003703002004420037022c2004410036021c20044100360218200441186a1001200020022802002202360200200120032903002205370200200441106a2002360200200441086a200537030020042004290318220537022c200420053703002004411410004100200441c0006a3602040b0b0a010041040b0410c00000"
|
||||
.from_hex()
|
||||
.unwrap()
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wasm_activated_test() {
|
||||
let contract_address = Address::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap();
|
||||
let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
|
||||
|
||||
let mut state = get_temp_state();
|
||||
state.add_balance(&sender, &U256::from(10000000000u64), CleanupMode::NoEmpty).unwrap();
|
||||
state.commit().unwrap();
|
||||
|
||||
let mut params = ActionParams::default();
|
||||
params.origin = sender.clone();
|
||||
params.sender = sender.clone();
|
||||
params.address = contract_address.clone();
|
||||
params.gas = U256::from(20025);
|
||||
params.code = Some(wasm_sample_code());
|
||||
|
||||
let mut info = EnvInfo::default();
|
||||
|
||||
// 100 > 10
|
||||
info.number = 100;
|
||||
|
||||
// Network with wasm activated at block 10
|
||||
let machine = ::ethereum::new_kovan_wasm_test_machine();
|
||||
|
||||
let mut output = [0u8; 20];
|
||||
let FinalizationResult { gas_left: result, .. } = {
|
||||
let mut ex = Executive::new(&mut state, &info, &machine);
|
||||
ex.call(params.clone(), &mut Substate::new(), BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap()
|
||||
};
|
||||
|
||||
assert_eq!(result, U256::from(18433));
|
||||
// Transaction successfully returned sender
|
||||
assert_eq!(output[..], sender[..]);
|
||||
|
||||
// 1 < 10
|
||||
info.number = 1;
|
||||
|
||||
let mut output = [0u8; 20];
|
||||
let FinalizationResult { gas_left: result, .. } = {
|
||||
let mut ex = Executive::new(&mut state, &info, &machine);
|
||||
ex.call(params, &mut Substate::new(), BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap()
|
||||
};
|
||||
|
||||
assert_eq!(result, U256::from(20025));
|
||||
// Since transaction errored due to wasm was not activated, result is just empty
|
||||
assert_eq!(output[..], [0u8; 20][..]);
|
||||
}
|
||||
}
|
||||
|
@ -15,14 +15,44 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use trie::TrieFactory;
|
||||
use evm::Factory as EvmFactory;
|
||||
use account_db::Factory as AccountFactory;
|
||||
use evm::{Factory as EvmFactory, VMType};
|
||||
use vm::{Vm, ActionParams, Schedule};
|
||||
use wasm::WasmInterpreter;
|
||||
|
||||
const WASM_MAGIC_NUMBER: &'static [u8; 4] = b"\0asm";
|
||||
|
||||
/// Virtual machine factory
|
||||
#[derive(Default, Clone)]
|
||||
pub struct VmFactory {
|
||||
evm: EvmFactory,
|
||||
}
|
||||
|
||||
impl VmFactory {
|
||||
pub fn create(&self, params: &ActionParams, schedule: &Schedule) -> Box<Vm> {
|
||||
if schedule.wasm.is_some() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) {
|
||||
Box::new(WasmInterpreter)
|
||||
} else {
|
||||
self.evm.create(¶ms.gas)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(evm: VMType, cache_size: usize) -> Self {
|
||||
VmFactory { evm: EvmFactory::new(evm, cache_size) }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EvmFactory> for VmFactory {
|
||||
fn from(evm: EvmFactory) -> Self {
|
||||
VmFactory { evm: evm }
|
||||
}
|
||||
}
|
||||
|
||||
/// Collection of factories.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Factories {
|
||||
/// factory for evm.
|
||||
pub vm: EvmFactory,
|
||||
pub vm: VmFactory,
|
||||
/// factory for tries.
|
||||
pub trie: TrieFactory,
|
||||
/// factory for account databases.
|
||||
|
@ -259,7 +259,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
|
||||
&mut tracer,
|
||||
&mut vm_tracer,
|
||||
));
|
||||
let mut evm = vm_factory.create(params.gas);
|
||||
let mut evm = vm_factory.create(¶ms, &machine.schedule(0u64.into()));
|
||||
let res = evm.exec(params, &mut ex);
|
||||
// a return in finalize will not alter callcreates
|
||||
let callcreates = ex.callcreates.clone();
|
||||
|
@ -155,8 +155,8 @@ pub mod verification;
|
||||
pub mod views;
|
||||
|
||||
mod cache_manager;
|
||||
mod blooms;
|
||||
mod basic_types;
|
||||
mod blooms;
|
||||
mod pod_account;
|
||||
mod state_db;
|
||||
mod account_db;
|
||||
|
@ -264,15 +264,9 @@ impl EthereumMachine {
|
||||
} else if block_number < ext.eip150_transition {
|
||||
Schedule::new_homestead()
|
||||
} else {
|
||||
// There's no max_code_size transition so we tie it to eip161abc
|
||||
let max_code_size = if block_number >= ext.eip161abc_transition {
|
||||
self.params.max_code_size as usize
|
||||
} else {
|
||||
usize::max_value()
|
||||
};
|
||||
|
||||
let max_code_size = self.params.max_code_size(block_number);
|
||||
let mut schedule = Schedule::new_post_eip150(
|
||||
max_code_size,
|
||||
max_code_size as _,
|
||||
block_number >= ext.eip160_transition,
|
||||
block_number >= ext.eip161abc_transition,
|
||||
block_number >= ext.eip161d_transition
|
||||
@ -377,11 +371,6 @@ impl EthereumMachine {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// If this machine supports wasm.
|
||||
pub fn supports_wasm(&self) -> bool {
|
||||
self.params().wasm
|
||||
}
|
||||
|
||||
/// Additional params.
|
||||
pub fn additional_params(&self) -> HashMap<String, String> {
|
||||
hash_map![
|
||||
|
@ -57,6 +57,8 @@ pub enum Error {
|
||||
VersionNotSupported(u64),
|
||||
/// Max chunk size is to small to fit basic account data.
|
||||
ChunkTooSmall,
|
||||
/// Oversized chunk
|
||||
ChunkTooLarge,
|
||||
/// Snapshots not supported by the consensus engine.
|
||||
SnapshotsUnsupported,
|
||||
/// Bad epoch transition.
|
||||
@ -85,6 +87,7 @@ impl fmt::Display for Error {
|
||||
Error::Trie(ref err) => err.fmt(f),
|
||||
Error::VersionNotSupported(ref ver) => write!(f, "Snapshot version {} is not supprted.", ver),
|
||||
Error::ChunkTooSmall => write!(f, "Chunk size is too small."),
|
||||
Error::ChunkTooLarge => write!(f, "Chunk size is too large."),
|
||||
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
|
||||
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
|
||||
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
|
||||
|
@ -77,6 +77,11 @@ mod traits;
|
||||
// Try to have chunks be around 4MB (before compression)
|
||||
const PREFERRED_CHUNK_SIZE: usize = 4 * 1024 * 1024;
|
||||
|
||||
// Maximal chunk size (decompressed)
|
||||
// Snappy::decompressed_len estimation may sometimes yield results greater
|
||||
// than PREFERRED_CHUNK_SIZE so allow some threshold here.
|
||||
const MAX_CHUNK_SIZE: usize = PREFERRED_CHUNK_SIZE / 4 * 5;
|
||||
|
||||
// Minimum supported state chunk version.
|
||||
const MIN_SUPPORTED_STATE_CHUNK_VERSION: u64 = 1;
|
||||
// current state chunk version.
|
||||
|
@ -23,7 +23,7 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
|
||||
use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService};
|
||||
use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService, MAX_CHUNK_SIZE};
|
||||
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
|
||||
|
||||
use blockchain::BlockChain;
|
||||
@ -130,6 +130,11 @@ impl Restoration {
|
||||
// feeds a state chunk, aborts early if `flag` becomes false.
|
||||
fn feed_state(&mut self, hash: H256, chunk: &[u8], flag: &AtomicBool) -> Result<(), Error> {
|
||||
if self.state_chunks_left.contains(&hash) {
|
||||
let expected_len = snappy::decompressed_len(chunk)?;
|
||||
if expected_len > MAX_CHUNK_SIZE {
|
||||
trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE);
|
||||
return Err(::snapshot::Error::ChunkTooLarge.into());
|
||||
}
|
||||
let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?;
|
||||
|
||||
self.state.feed(&self.snappy_buffer[..len], flag)?;
|
||||
@ -147,6 +152,11 @@ impl Restoration {
|
||||
// feeds a block chunk
|
||||
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &EthEngine, flag: &AtomicBool) -> Result<(), Error> {
|
||||
if self.block_chunks_left.contains(&hash) {
|
||||
let expected_len = snappy::decompressed_len(chunk)?;
|
||||
if expected_len > MAX_CHUNK_SIZE {
|
||||
trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE);
|
||||
return Err(::snapshot::Error::ChunkTooLarge.into());
|
||||
}
|
||||
let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?;
|
||||
|
||||
self.secondary.feed(&self.snappy_buffer[..len], engine, flag)?;
|
||||
|
@ -19,7 +19,7 @@
|
||||
use devtools::RandomTempPath;
|
||||
use error::Error;
|
||||
|
||||
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
|
||||
use blockchain::generator::{BlockGenerator, BlockBuilder};
|
||||
use blockchain::BlockChain;
|
||||
use snapshot::{chunk_secondary, Error as SnapshotError, Progress, SnapshotComponents};
|
||||
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||
@ -35,9 +35,10 @@ use std::sync::atomic::AtomicBool;
|
||||
const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { blocks: 30000, max_restore_blocks: 30000 };
|
||||
|
||||
fn chunk_and_restore(amount: u64) {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
||||
let genesis = BlockBuilder::genesis();
|
||||
let rest = genesis.add_blocks(amount as usize);
|
||||
let generator = BlockGenerator::new(vec![rest]);
|
||||
let genesis = genesis.last();
|
||||
|
||||
let engine = ::spec::Spec::new_test().engine;
|
||||
let new_path = RandomTempPath::create_dir();
|
||||
@ -45,13 +46,12 @@ fn chunk_and_restore(amount: u64) {
|
||||
snapshot_path.push("SNAP");
|
||||
|
||||
let old_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
|
||||
let bc = BlockChain::new(Default::default(), &genesis.encoded(), old_db.clone());
|
||||
|
||||
// build the blockchain.
|
||||
let mut batch = DBTransaction::new();
|
||||
for _ in 0..amount {
|
||||
let block = canon_chain.generate(&mut finalizer).unwrap();
|
||||
bc.insert_block(&mut batch, &block, vec![]);
|
||||
for block in generator {
|
||||
bc.insert_block(&mut batch, &block.encoded(), vec![]);
|
||||
bc.commit();
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ fn chunk_and_restore(amount: u64) {
|
||||
|
||||
// restore it.
|
||||
let new_db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
|
||||
let new_chain = BlockChain::new(Default::default(), &genesis.encoded(), new_db.clone());
|
||||
let mut rebuilder = SNAPSHOT_MODE.rebuilder(new_chain, new_db.clone(), &manifest).unwrap();
|
||||
|
||||
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
||||
@ -97,15 +97,19 @@ fn chunk_and_restore(amount: u64) {
|
||||
drop(rebuilder);
|
||||
|
||||
// and test it.
|
||||
let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
|
||||
let new_chain = BlockChain::new(Default::default(), &genesis.encoded(), new_db);
|
||||
assert_eq!(new_chain.best_block_hash(), best_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chunk_and_restore_500() { chunk_and_restore(500) }
|
||||
fn chunk_and_restore_500() {
|
||||
chunk_and_restore(500)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chunk_and_restore_40k() { chunk_and_restore(40000) }
|
||||
fn chunk_and_restore_4k() {
|
||||
chunk_and_restore(4000)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn checks_flag() {
|
||||
@ -120,17 +124,12 @@ fn checks_flag() {
|
||||
|
||||
stream.append_empty_data().append_empty_data();
|
||||
|
||||
let genesis = {
|
||||
let mut canon_chain = ChainGenerator::default();
|
||||
let mut finalizer = BlockFinalizer::default();
|
||||
canon_chain.generate(&mut finalizer).unwrap()
|
||||
};
|
||||
|
||||
let genesis = BlockBuilder::genesis();
|
||||
let chunk = stream.out();
|
||||
|
||||
let db = Arc::new(kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap_or(0)));
|
||||
let engine = ::spec::Spec::new_test().engine;
|
||||
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
|
||||
let chain = BlockChain::new(Default::default(), &genesis.last().encoded(), db.clone());
|
||||
|
||||
let manifest = ::snapshot::ManifestData {
|
||||
version: 2,
|
||||
|
@ -110,8 +110,8 @@ pub struct CommonParams {
|
||||
pub nonce_cap_increment: u64,
|
||||
/// Enable dust cleanup for contracts.
|
||||
pub remove_dust_contracts: bool,
|
||||
/// Wasm support
|
||||
pub wasm: bool,
|
||||
/// Wasm activation blocknumber, if any disabled initially.
|
||||
pub wasm_activation_transition: BlockNumber,
|
||||
/// Gas limit bound divisor (how much gas limit can change per block)
|
||||
pub gas_limit_bound_divisor: U256,
|
||||
/// Registrar contract address.
|
||||
@ -120,6 +120,8 @@ pub struct CommonParams {
|
||||
pub node_permission_contract: Option<Address>,
|
||||
/// Maximum contract code size that can be deployed.
|
||||
pub max_code_size: u64,
|
||||
/// Number of first block where max code size limit is active.
|
||||
pub max_code_size_transition: BlockNumber,
|
||||
/// Transaction permission managing contract address.
|
||||
pub transaction_permission_contract: Option<Address>,
|
||||
}
|
||||
@ -127,11 +129,20 @@ pub struct CommonParams {
|
||||
impl CommonParams {
|
||||
/// Schedule for an EVM in the post-EIP-150-era of the Ethereum main net.
|
||||
pub fn schedule(&self, block_number: u64) -> ::vm::Schedule {
|
||||
let mut schedule = ::vm::Schedule::new_post_eip150(self.max_code_size as _, true, true, true);
|
||||
let mut schedule = ::vm::Schedule::new_post_eip150(self.max_code_size(block_number) as _, true, true, true);
|
||||
self.update_schedule(block_number, &mut schedule);
|
||||
schedule
|
||||
}
|
||||
|
||||
/// Returns max code size at given block.
|
||||
pub fn max_code_size(&self, block_number: u64) -> u64 {
|
||||
if block_number >= self.max_code_size_transition {
|
||||
self.max_code_size
|
||||
} else {
|
||||
u64::max_value()
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply common spec config parameters to the schedule.
|
||||
pub fn update_schedule(&self, block_number: u64, schedule: &mut ::vm::Schedule) {
|
||||
schedule.have_create2 = block_number >= self.eip86_transition;
|
||||
@ -147,6 +158,9 @@ impl CommonParams {
|
||||
false => ::vm::CleanDustMode::BasicOnly,
|
||||
};
|
||||
}
|
||||
if block_number >= self.wasm_activation_transition {
|
||||
schedule.wasm = Some(Default::default());
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether these params contain any bug-fix hard forks.
|
||||
@ -221,12 +235,16 @@ impl From<ethjson::spec::Params> for CommonParams {
|
||||
),
|
||||
nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into),
|
||||
remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false),
|
||||
wasm: p.wasm.unwrap_or(false),
|
||||
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
|
||||
registrar: p.registrar.map_or_else(Address::new, Into::into),
|
||||
node_permission_contract: p.node_permission_contract.map(Into::into),
|
||||
max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into),
|
||||
max_code_size_transition: p.max_code_size_transition.map_or(0, Into::into),
|
||||
transaction_permission_contract: p.transaction_permission_contract.map(Into::into),
|
||||
wasm_activation_transition: p.wasm_activation_transition.map_or(
|
||||
BlockNumber::max_value(),
|
||||
Into::into
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ use executed::{Executed, ExecutionError};
|
||||
use types::state_diff::StateDiff;
|
||||
use transaction::SignedTransaction;
|
||||
use state_db::StateDB;
|
||||
use evm::{Factory as EvmFactory};
|
||||
use factory::VmFactory;
|
||||
|
||||
use bigint::prelude::U256;
|
||||
use bigint::hash::H256;
|
||||
@ -376,7 +376,7 @@ impl<B: Backend> State<B> {
|
||||
}
|
||||
|
||||
/// Get a VM factory that can execute on this state.
|
||||
pub fn vm_factory(&self) -> EvmFactory {
|
||||
pub fn vm_factory(&self) -> VmFactory {
|
||||
self.factories.vm.clone()
|
||||
}
|
||||
|
||||
@ -833,16 +833,26 @@ impl<B: Backend> State<B> {
|
||||
}))
|
||||
}
|
||||
|
||||
fn query_pod(&mut self, query: &PodState) -> trie::Result<()> {
|
||||
for (address, pod_account) in query.get() {
|
||||
// Return a list of all touched addresses in cache.
|
||||
fn touched_addresses(&self) -> Vec<Address> {
|
||||
assert!(self.checkpoints.borrow().is_empty());
|
||||
self.cache.borrow().iter().map(|(add, _)| *add).collect()
|
||||
}
|
||||
|
||||
fn query_pod(&mut self, query: &PodState, touched_addresses: &[Address]) -> trie::Result<()> {
|
||||
let pod = query.get();
|
||||
|
||||
for address in touched_addresses {
|
||||
if !self.ensure_cached(address, RequireCache::Code, true, |a| a.is_some())? {
|
||||
continue
|
||||
}
|
||||
|
||||
// needs to be split into two parts for the refcell code here
|
||||
// to work.
|
||||
for key in pod_account.storage.keys() {
|
||||
self.storage_at(address, key)?;
|
||||
if let Some(pod_account) = pod.get(address) {
|
||||
// needs to be split into two parts for the refcell code here
|
||||
// to work.
|
||||
for key in pod_account.storage.keys() {
|
||||
self.storage_at(address, key)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -852,9 +862,10 @@ impl<B: Backend> State<B> {
|
||||
/// Returns a `StateDiff` describing the difference from `orig` to `self`.
|
||||
/// Consumes self.
|
||||
pub fn diff_from<X: Backend>(&self, orig: State<X>) -> trie::Result<StateDiff> {
|
||||
let addresses_post = self.touched_addresses();
|
||||
let pod_state_post = self.to_pod();
|
||||
let mut state_pre = orig;
|
||||
state_pre.query_pod(&pod_state_post)?;
|
||||
state_pre.query_pod(&pod_state_post, &addresses_post)?;
|
||||
Ok(pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post))
|
||||
}
|
||||
|
||||
@ -1406,7 +1417,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_not_trace_delegatecall() {
|
||||
fn should_trace_delegatecall_properly() {
|
||||
init_log();
|
||||
|
||||
let mut state = get_temp_state();
|
||||
@ -1426,7 +1437,7 @@ mod tests {
|
||||
}.sign(&secret(), None);
|
||||
|
||||
state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap();
|
||||
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap();
|
||||
state.init_code(&0xb.into(), FromHex::from_hex("60056000526001601ff3").unwrap()).unwrap();
|
||||
let result = state.apply(&info, &machine, &t, true).unwrap();
|
||||
|
||||
let expected_trace = vec![FlatTrace {
|
||||
@ -1441,23 +1452,23 @@ mod tests {
|
||||
call_type: CallType::Call,
|
||||
}),
|
||||
result: trace::Res::Call(trace::CallResult {
|
||||
gas_used: U256::from(721), // in post-eip150
|
||||
gas_used: U256::from(736), // in post-eip150
|
||||
output: vec![]
|
||||
}),
|
||||
}, FlatTrace {
|
||||
trace_address: vec![0].into_iter().collect(),
|
||||
subtraces: 0,
|
||||
action: trace::Action::Call(trace::Call {
|
||||
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(),
|
||||
to: 0xa.into(),
|
||||
from: 0xa.into(),
|
||||
to: 0xb.into(),
|
||||
value: 0.into(),
|
||||
gas: 32768.into(),
|
||||
input: vec![],
|
||||
call_type: CallType::DelegateCall,
|
||||
}),
|
||||
result: trace::Res::Call(trace::CallResult {
|
||||
gas_used: 3.into(),
|
||||
output: vec![],
|
||||
gas_used: 18.into(),
|
||||
output: vec![5],
|
||||
}),
|
||||
}];
|
||||
|
||||
@ -2182,4 +2193,37 @@ mod tests {
|
||||
assert!(state.exists(&d).unwrap());
|
||||
assert!(!state.exists(&e).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_trace_diff_suicided_accounts() {
|
||||
use pod_account;
|
||||
|
||||
let a = 10.into();
|
||||
let db = get_temp_state_db();
|
||||
let (root, db) = {
|
||||
let mut state = State::new(db, U256::from(0), Default::default());
|
||||
state.add_balance(&a, &100.into(), CleanupMode::ForceCreate).unwrap();
|
||||
state.commit().unwrap();
|
||||
state.drop()
|
||||
};
|
||||
|
||||
let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||
let original = state.clone();
|
||||
state.kill_account(&a);
|
||||
|
||||
assert_eq!(original.touched_addresses(), vec![]);
|
||||
assert_eq!(state.touched_addresses(), vec![a]);
|
||||
|
||||
let diff = state.diff_from(original).unwrap();
|
||||
let diff_map = diff.get();
|
||||
assert_eq!(diff_map.len(), 1);
|
||||
assert!(diff_map.get(&a).is_some());
|
||||
assert_eq!(diff_map.get(&a),
|
||||
pod_account::diff_pod(Some(&PodAccount {
|
||||
balance: U256::from(100),
|
||||
nonce: U256::zero(),
|
||||
code: Some(Default::default()),
|
||||
storage: Default::default()
|
||||
}), None).as_ref());
|
||||
}
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ pub fn get_temp_state() -> State<::state_db::StateDB> {
|
||||
pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> {
|
||||
let journal_db = get_temp_state_db();
|
||||
let mut factories = Factories::default();
|
||||
factories.vm = factory;
|
||||
factories.vm = factory.into();
|
||||
State::new(journal_db, U256::from(0), factories)
|
||||
}
|
||||
|
||||
|
@ -1,77 +0,0 @@
|
||||
use bloomchain::Bloom;
|
||||
use bloomchain::group::{BloomGroup, GroupPosition};
|
||||
use basic_types::LogBloom;
|
||||
|
||||
/// Helper structure representing bloom of the trace.
|
||||
#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||
pub struct BlockTracesBloom(LogBloom);
|
||||
|
||||
impl From<LogBloom> for BlockTracesBloom {
|
||||
fn from(bloom: LogBloom) -> BlockTracesBloom {
|
||||
BlockTracesBloom(bloom)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Bloom> for BlockTracesBloom {
|
||||
fn from(bloom: Bloom) -> BlockTracesBloom {
|
||||
let bytes: [u8; 256] = bloom.into();
|
||||
BlockTracesBloom(LogBloom::from(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<Bloom> for BlockTracesBloom {
|
||||
fn into(self) -> Bloom {
|
||||
let log = self.0;
|
||||
Bloom::from(log.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents group of X consecutive blooms.
|
||||
#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||
pub struct BlockTracesBloomGroup {
|
||||
blooms: Vec<BlockTracesBloom>,
|
||||
}
|
||||
|
||||
impl From<BloomGroup> for BlockTracesBloomGroup {
|
||||
fn from(group: BloomGroup) -> Self {
|
||||
let blooms = group.blooms
|
||||
.into_iter()
|
||||
.map(From::from)
|
||||
.collect();
|
||||
|
||||
BlockTracesBloomGroup {
|
||||
blooms: blooms
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<BloomGroup> for BlockTracesBloomGroup {
|
||||
fn into(self) -> BloomGroup {
|
||||
let blooms = self.blooms
|
||||
.into_iter()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
|
||||
BloomGroup {
|
||||
blooms: blooms
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents `BloomGroup` position in database.
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
||||
pub struct TraceGroupPosition {
|
||||
/// Bloom level.
|
||||
pub level: u8,
|
||||
/// Group index.
|
||||
pub index: u32,
|
||||
}
|
||||
|
||||
impl From<GroupPosition> for TraceGroupPosition {
|
||||
fn from(p: GroupPosition) -> Self {
|
||||
TraceGroupPosition {
|
||||
level: p.level as u8,
|
||||
index: p.index as u32,
|
||||
}
|
||||
}
|
||||
}
|
@ -279,7 +279,6 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
|
||||
} else {
|
||||
self.traces(block_hash).expect("Traces database is incomplete.").bloom()
|
||||
})
|
||||
.map(blooms::Bloom::from)
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
//! Tracing
|
||||
|
||||
mod bloom;
|
||||
mod config;
|
||||
mod db;
|
||||
mod executive_tracer;
|
||||
|
@ -17,12 +17,13 @@
|
||||
//! Trace filters type definitions
|
||||
|
||||
use std::ops::Range;
|
||||
use bloomchain::{Filter as BloomFilter, Bloom, Number};
|
||||
use hash::keccak;
|
||||
use util::Address;
|
||||
use bloomable::Bloomable;
|
||||
use basic_types::LogBloom;
|
||||
use bloomable::Bloomable;
|
||||
use bloomchain::{Filter as BloomFilter, Number, Bloom};
|
||||
use hash::keccak;
|
||||
use trace::flat::FlatTrace;
|
||||
use util::Address;
|
||||
|
||||
use super::trace::{Action, Res};
|
||||
|
||||
/// Addresses filter.
|
||||
@ -90,9 +91,6 @@ pub struct Filter {
|
||||
impl BloomFilter for Filter {
|
||||
fn bloom_possibilities(&self) -> Vec<Bloom> {
|
||||
self.bloom_possibilities()
|
||||
.into_iter()
|
||||
.map(|b| Bloom::from(b.0))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn range(&self) -> Range<Number> {
|
||||
|
@ -74,13 +74,23 @@ pub struct Call {
|
||||
|
||||
impl From<ActionParams> for Call {
|
||||
fn from(p: ActionParams) -> Self {
|
||||
Call {
|
||||
from: p.sender,
|
||||
to: p.address,
|
||||
value: p.value.value(),
|
||||
gas: p.gas,
|
||||
input: p.data.unwrap_or_else(Vec::new),
|
||||
call_type: p.call_type,
|
||||
match p.call_type {
|
||||
CallType::DelegateCall => Call {
|
||||
from: p.address,
|
||||
to: p.code_address,
|
||||
value: p.value.value(),
|
||||
gas: p.gas,
|
||||
input: p.data.unwrap_or_else(Vec::new),
|
||||
call_type: p.call_type,
|
||||
},
|
||||
_ => Call {
|
||||
from: p.sender,
|
||||
to: p.address,
|
||||
value: p.value.value(),
|
||||
gas: p.gas,
|
||||
input: p.data.unwrap_or_else(Vec::new),
|
||||
call_type: p.call_type,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ pub mod blocks {
|
||||
use super::{Kind, BlockLike};
|
||||
|
||||
use engines::EthEngine;
|
||||
use error::Error;
|
||||
use error::{Error, BlockError};
|
||||
use header::Header;
|
||||
use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered};
|
||||
|
||||
@ -90,6 +90,10 @@ pub mod blocks {
|
||||
fn create(input: Self::Input, engine: &EthEngine) -> Result<Self::Unverified, Error> {
|
||||
match verify_block_basic(&input.header, &input.bytes, engine) {
|
||||
Ok(()) => Ok(input),
|
||||
Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => {
|
||||
debug!(target: "client", "Block received too early {}: {:?}", input.hash(), oob);
|
||||
Err(BlockError::TemporarilyInvalid(oob).into())
|
||||
},
|
||||
Err(e) => {
|
||||
warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e);
|
||||
Err(e)
|
||||
|
@ -28,7 +28,7 @@ use client::BlockChainClient;
|
||||
use engines::EthEngine;
|
||||
use error::{BlockError, Error};
|
||||
use header::{BlockNumber, Header};
|
||||
use transaction::SignedTransaction;
|
||||
use transaction::{SignedTransaction, UnverifiedTransaction};
|
||||
use views::BlockView;
|
||||
|
||||
use bigint::hash::H256;
|
||||
@ -69,11 +69,9 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &EthEngine) ->
|
||||
verify_header_params(&u, engine, false)?;
|
||||
engine.verify_block_basic(&u)?;
|
||||
}
|
||||
// Verify transactions.
|
||||
// TODO: either use transaction views or cache the decoded transactions.
|
||||
let v = BlockView::new(bytes);
|
||||
for t in v.transactions() {
|
||||
engine.verify_transaction_basic(&t, &header)?;
|
||||
|
||||
for t in UntrustedRlp::new(bytes).at(1)?.iter().map(|rlp| rlp.as_val::<UnverifiedTransaction>()) {
|
||||
engine.verify_transaction_basic(&t?, &header)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -355,6 +353,7 @@ mod tests {
|
||||
use types::log_entry::{LogEntry, LocalizedLogEntry};
|
||||
use time::get_time;
|
||||
use encoded;
|
||||
use rlp;
|
||||
|
||||
fn check_ok(result: Result<(), Error>) {
|
||||
result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e));
|
||||
@ -508,6 +507,27 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_block_basic_with_invalid_transactions() {
|
||||
let spec = Spec::new_test();
|
||||
let engine = &*spec.engine;
|
||||
|
||||
let block = {
|
||||
let mut rlp = rlp::RlpStream::new_list(3);
|
||||
let mut header = Header::default();
|
||||
// that's an invalid transaction list rlp
|
||||
let invalid_transactions = vec![vec![0u8]];
|
||||
header.set_transactions_root(ordered_trie_root(invalid_transactions.clone()));
|
||||
header.set_gas_limit(engine.params().min_gas_limit);
|
||||
rlp.append(&header);
|
||||
rlp.append_list::<Vec<u8>, _>(&invalid_transactions);
|
||||
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
|
||||
rlp.out()
|
||||
};
|
||||
|
||||
assert!(basic_test(&block, engine).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_block() {
|
||||
use rlp::RlpStream;
|
||||
|
@ -12,6 +12,7 @@ ethcore-util = { path = "../../util" }
|
||||
ethcore-bigint = { path = "../../util/bigint" }
|
||||
ethjson = { path = "../../json" }
|
||||
bloomable = { path = "../../util/bloomable" }
|
||||
ethbloom = "0.2.5"
|
||||
keccak-hash = { path = "../../util/hash" }
|
||||
heapsize = "0.4"
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
//! Blockchain filter
|
||||
|
||||
use util::Address;
|
||||
use bigint::hash::{H256, H2048};
|
||||
use bigint::hash::{H256, H2048 as Bloom};
|
||||
use bloomable::Bloomable;
|
||||
use ids::BlockId;
|
||||
use log_entry::LogEntry;
|
||||
@ -75,15 +75,15 @@ impl Clone for Filter {
|
||||
|
||||
impl Filter {
|
||||
/// Returns combinations of each address and topic.
|
||||
pub fn bloom_possibilities(&self) -> Vec<H2048> {
|
||||
pub fn bloom_possibilities(&self) -> Vec<Bloom> {
|
||||
let blooms = match self.address {
|
||||
Some(ref addresses) if !addresses.is_empty() =>
|
||||
addresses.iter().map(|ref address| {
|
||||
let mut bloom = H2048::default();
|
||||
let mut bloom = Bloom::default();
|
||||
bloom.shift_bloomed(&keccak(address));
|
||||
bloom
|
||||
}).collect(),
|
||||
_ => vec![H2048::default()]
|
||||
_ => vec![Bloom::default()]
|
||||
};
|
||||
|
||||
self.topics.iter().fold(blooms, |bs, topic| match *topic {
|
||||
@ -93,7 +93,7 @@ impl Filter {
|
||||
let mut b = bloom.clone();
|
||||
b.shift_bloomed(&keccak(topic));
|
||||
b
|
||||
}).collect::<Vec<H2048>>()
|
||||
}).collect::<Vec<Bloom>>()
|
||||
}).collect()
|
||||
})
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ extern crate rlp;
|
||||
#[macro_use]
|
||||
extern crate rlp_derive;
|
||||
extern crate bloomable;
|
||||
extern crate ethbloom;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate heapsize;
|
||||
|
||||
|
@ -38,7 +38,7 @@ pub mod tests;
|
||||
pub use action_params::{ActionParams, ActionValue, ParamsType};
|
||||
pub use call_type::CallType;
|
||||
pub use env_info::{EnvInfo, LastHashes};
|
||||
pub use schedule::{Schedule, CleanDustMode};
|
||||
pub use schedule::{Schedule, CleanDustMode, WasmCosts};
|
||||
pub use ext::{Ext, MessageCallResult, ContractCreateResult, CreateContractAddress};
|
||||
pub use return_data::{ReturnData, GasLeft};
|
||||
pub use error::{Error, Result};
|
||||
|
@ -113,47 +113,53 @@ pub struct Schedule {
|
||||
pub kill_dust: CleanDustMode,
|
||||
/// Enable EIP-86 rules
|
||||
pub eip86: bool,
|
||||
/// Wasm extra schedule settings
|
||||
pub wasm: WasmCosts,
|
||||
/// Wasm extra schedule settings, if wasm activated
|
||||
pub wasm: Option<WasmCosts>,
|
||||
}
|
||||
|
||||
/// Wasm cost table
|
||||
pub struct WasmCosts {
|
||||
/// Arena allocator cost, per byte
|
||||
pub alloc: u32,
|
||||
/// Default opcode cost
|
||||
pub regular: u32,
|
||||
/// Div operations multiplier.
|
||||
pub div: u32,
|
||||
/// Div operations multiplier.
|
||||
pub mul: u32,
|
||||
/// Memory (load/store) operations multiplier.
|
||||
pub mem: u32,
|
||||
/// Memory copy operation, per byte.
|
||||
pub mem_copy: u32,
|
||||
/// Memory move operation, per byte.
|
||||
pub mem_move: u32,
|
||||
/// Memory set operation, per byte.
|
||||
pub mem_set: u32,
|
||||
/// Static region charge, per byte.
|
||||
pub static_region: u32,
|
||||
/// General static query of U256 value from env-info
|
||||
pub static_u256: u32,
|
||||
/// General static query of Address value from env-info
|
||||
pub static_address: u32,
|
||||
/// Memory stipend. Amount of free memory (in 64kb pages) each contract can use for stack.
|
||||
pub initial_mem: u32,
|
||||
/// Grow memory cost, per page (64kb)
|
||||
pub grow_mem: u32,
|
||||
/// Memory copy cost, per byte
|
||||
pub memcpy: u32,
|
||||
/// Max stack height (native WebAssembly stack limiter)
|
||||
pub max_stack_height: u32,
|
||||
/// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div`
|
||||
pub opcodes_mul: u32,
|
||||
/// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div`
|
||||
pub opcodes_div: u32,
|
||||
}
|
||||
|
||||
impl Default for WasmCosts {
|
||||
fn default() -> Self {
|
||||
WasmCosts {
|
||||
alloc: 2,
|
||||
regular: 1,
|
||||
div: 16,
|
||||
mul: 4,
|
||||
mem: 2,
|
||||
mem_copy: 1,
|
||||
mem_move: 1,
|
||||
mem_set: 1,
|
||||
static_region: 1,
|
||||
static_u256: 64,
|
||||
static_address: 40,
|
||||
initial_mem: 4096,
|
||||
grow_mem: 8192,
|
||||
memcpy: 1,
|
||||
max_stack_height: 64*1024,
|
||||
opcodes_mul: 3,
|
||||
opcodes_div: 8,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -230,7 +236,7 @@ impl Schedule {
|
||||
have_static_call: false,
|
||||
kill_dust: CleanDustMode::Off,
|
||||
eip86: false,
|
||||
wasm: Default::default(),
|
||||
wasm: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,9 +299,17 @@ impl Schedule {
|
||||
have_static_call: false,
|
||||
kill_dust: CleanDustMode::Off,
|
||||
eip86: false,
|
||||
wasm: Default::default(),
|
||||
wasm: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns wasm schedule
|
||||
///
|
||||
/// May panic if there is no wasm schedule
|
||||
pub fn wasm(&self) -> &WasmCosts {
|
||||
// *** Prefer PANIC here instead of silently breaking consensus! ***
|
||||
self.wasm.as_ref().expect("Wasm schedule expected to exist while checking wasm contract. Misconfigured client?")
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Schedule {
|
||||
|
@ -78,15 +78,23 @@ pub fn test_finalize(res: Result<GasLeft>) -> Result<U256> {
|
||||
}
|
||||
|
||||
impl FakeExt {
|
||||
/// New fake externalities
|
||||
pub fn new() -> Self {
|
||||
FakeExt::default()
|
||||
}
|
||||
|
||||
/// New fake externalities with byzantium schedule rules
|
||||
pub fn new_byzantium() -> Self {
|
||||
let mut ext = FakeExt::default();
|
||||
ext.schedule = Schedule::new_byzantium();
|
||||
ext
|
||||
}
|
||||
|
||||
/// Alter fake externalities to allow wasm
|
||||
pub fn with_wasm(mut self) -> Self {
|
||||
self.schedule.wasm = Some(Default::default());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Ext for FakeExt {
|
||||
|
@ -8,7 +8,9 @@ byteorder = "1.0"
|
||||
ethcore-util = { path = "../../util" }
|
||||
ethcore-bigint = { path = "../../util/bigint" }
|
||||
log = "0.3"
|
||||
parity-wasm = "0.15"
|
||||
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
||||
parity-wasm = "0.27"
|
||||
libc = "0.2"
|
||||
pwasm-utils = "0.1"
|
||||
vm = { path = "../vm" }
|
||||
ethcore-logger = { path = "../../logger" }
|
||||
wasmi = { version = "0.1.3", features = ["opt-in-32bit"] }
|
||||
|
@ -16,7 +16,7 @@ fn load_code<P: AsRef<path::Path>>(p: P) -> io::Result<Vec<u8>> {
|
||||
}
|
||||
|
||||
fn wasm_interpreter() -> WasmInterpreter {
|
||||
WasmInterpreter::new().expect("wasm interpreter to create without errors")
|
||||
WasmInterpreter
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -130,7 +130,7 @@ pub fn run_fixture(fixture: &Fixture) -> Vec<Fail> {
|
||||
Err(e) => { return Fail::load(e); },
|
||||
};
|
||||
|
||||
let mut ext = FakeExt::new();
|
||||
let mut ext = FakeExt::new().with_wasm();
|
||||
params.code = Some(Arc::new(
|
||||
if let Source::Constructor { ref arguments, ref sender, ref at, .. } = fixture.source {
|
||||
match construct(&mut ext, source, arguments.clone().into(), sender.clone().into(), at.clone().into()) {
|
||||
|
@ -14,178 +14,285 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Wasm env module bindings
|
||||
//! Env module glue for wasmi interpreter
|
||||
|
||||
use parity_wasm::elements::ValueType::*;
|
||||
use parity_wasm::interpreter::{self, UserFunctionDescriptor};
|
||||
use parity_wasm::interpreter::UserFunctionDescriptor::*;
|
||||
use super::runtime::{Runtime, UserTrap};
|
||||
use std::cell::RefCell;
|
||||
use wasmi::{
|
||||
self, Signature, Error, FuncRef, FuncInstance, MemoryDescriptor,
|
||||
MemoryRef, MemoryInstance, memory_units,
|
||||
};
|
||||
|
||||
pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
||||
Static(
|
||||
"_storage_read",
|
||||
&[I32; 2],
|
||||
/// Internal ids all functions runtime supports. This is just a glue for wasmi interpreter
|
||||
/// that lacks high-level api and later will be factored out
|
||||
pub mod ids {
|
||||
pub const STORAGE_WRITE_FUNC: usize = 0;
|
||||
pub const STORAGE_READ_FUNC: usize = 10;
|
||||
pub const RET_FUNC: usize = 20;
|
||||
pub const GAS_FUNC: usize = 30;
|
||||
pub const FETCH_INPUT_FUNC: usize = 40;
|
||||
pub const INPUT_LENGTH_FUNC: usize = 50;
|
||||
pub const CCALL_FUNC: usize = 60;
|
||||
pub const SCALL_FUNC: usize = 70;
|
||||
pub const DCALL_FUNC: usize = 80;
|
||||
pub const VALUE_FUNC: usize = 90;
|
||||
pub const CREATE_FUNC: usize = 100;
|
||||
pub const SUICIDE_FUNC: usize = 110;
|
||||
pub const BLOCKHASH_FUNC: usize = 120;
|
||||
pub const BLOCKNUMBER_FUNC: usize = 130;
|
||||
pub const COINBASE_FUNC: usize = 140;
|
||||
pub const DIFFICULTY_FUNC: usize = 150;
|
||||
pub const GASLIMIT_FUNC: usize = 160;
|
||||
pub const TIMESTAMP_FUNC: usize = 170;
|
||||
pub const ADDRESS_FUNC: usize = 180;
|
||||
pub const SENDER_FUNC: usize = 190;
|
||||
pub const ORIGIN_FUNC: usize = 200;
|
||||
pub const ELOG_FUNC: usize = 210;
|
||||
|
||||
pub const PANIC_FUNC: usize = 1000;
|
||||
pub const DEBUG_FUNC: usize = 1010;
|
||||
}
|
||||
|
||||
/// Signatures of all functions runtime supports. The actual dispatch happens at
|
||||
/// impl runtime::Runtime methods.
|
||||
pub mod signatures {
|
||||
use wasmi::{self, ValueType};
|
||||
use wasmi::ValueType::*;
|
||||
|
||||
pub struct StaticSignature(pub &'static [ValueType], pub Option<ValueType>);
|
||||
|
||||
pub const STORAGE_READ: StaticSignature = StaticSignature(
|
||||
&[I32, I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_storage_write",
|
||||
&[I32; 2],
|
||||
);
|
||||
|
||||
pub const STORAGE_WRITE: StaticSignature = StaticSignature(
|
||||
&[I32, I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_balance",
|
||||
&[I32; 2],
|
||||
);
|
||||
|
||||
pub const RET: StaticSignature = StaticSignature(
|
||||
&[I32, I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_ext_malloc",
|
||||
);
|
||||
|
||||
pub const GAS: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const FETCH_INPUT: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const INPUT_LENGTH: StaticSignature = StaticSignature(
|
||||
&[],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_ext_free",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"gas",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_debug",
|
||||
&[I32; 2],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_suicide",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_create",
|
||||
&[I32; 4],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_ccall",
|
||||
);
|
||||
|
||||
pub const CCALL: StaticSignature = StaticSignature(
|
||||
&[I64, I32, I32, I32, I32, I32, I32],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_dcall",
|
||||
);
|
||||
|
||||
pub const DCALL: StaticSignature = StaticSignature(
|
||||
&[I64, I32, I32, I32, I32, I32],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_scall",
|
||||
);
|
||||
|
||||
pub const SCALL: StaticSignature = StaticSignature(
|
||||
&[I64, I32, I32, I32, I32, I32],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"abort",
|
||||
);
|
||||
|
||||
pub const PANIC: StaticSignature = StaticSignature(
|
||||
&[I32, I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const DEBUG: StaticSignature = StaticSignature(
|
||||
&[I32, I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const VALUE: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_emscripten_memcpy_big",
|
||||
&[I32; 3],
|
||||
);
|
||||
|
||||
pub const CREATE: StaticSignature = StaticSignature(
|
||||
&[I32, I32, I32, I32],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_ext_memcpy",
|
||||
&[I32; 3],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_ext_memset",
|
||||
&[I32; 3],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_ext_memmove",
|
||||
&[I32; 3],
|
||||
Some(I32),
|
||||
),
|
||||
Static(
|
||||
"_panic",
|
||||
&[I32; 2],
|
||||
);
|
||||
|
||||
pub const SUICIDE: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_blockhash",
|
||||
);
|
||||
|
||||
pub const BLOCKHASH: StaticSignature = StaticSignature(
|
||||
&[I64, I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_coinbase",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_sender",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_origin",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_address",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_value",
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_timestamp",
|
||||
);
|
||||
|
||||
pub const BLOCKNUMBER: StaticSignature = StaticSignature(
|
||||
&[],
|
||||
Some(I64),
|
||||
),
|
||||
Static(
|
||||
"_blocknumber",
|
||||
);
|
||||
|
||||
pub const COINBASE: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const DIFFICULTY: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const GASLIMIT: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
);
|
||||
|
||||
pub const TIMESTAMP: StaticSignature = StaticSignature(
|
||||
&[],
|
||||
Some(I64),
|
||||
),
|
||||
Static(
|
||||
"_difficulty",
|
||||
);
|
||||
|
||||
pub const ADDRESS: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_gaslimit",
|
||||
);
|
||||
|
||||
pub const SENDER: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
Static(
|
||||
"_elog",
|
||||
&[I32; 4],
|
||||
);
|
||||
|
||||
pub const ORIGIN: StaticSignature = StaticSignature(
|
||||
&[I32],
|
||||
None,
|
||||
),
|
||||
);
|
||||
|
||||
// TODO: Get rid of it also somehow?
|
||||
Static(
|
||||
"_llvm_trap",
|
||||
&[I32; 0],
|
||||
None
|
||||
),
|
||||
pub const ELOG: StaticSignature = StaticSignature(
|
||||
&[I32, I32, I32, I32],
|
||||
None,
|
||||
);
|
||||
|
||||
Static(
|
||||
"_llvm_bswap_i64",
|
||||
&[I64],
|
||||
Some(I64)
|
||||
),
|
||||
];
|
||||
|
||||
pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserDefinedElements<'a, UserTrap> {
|
||||
interpreter::UserDefinedElements {
|
||||
executor: Some(runtime),
|
||||
globals: ::std::collections::HashMap::new(),
|
||||
functions: ::std::borrow::Cow::from(SIGNATURES),
|
||||
impl Into<wasmi::Signature> for StaticSignature {
|
||||
fn into(self) -> wasmi::Signature {
|
||||
wasmi::Signature::new(self.0, self.1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn host(signature: signatures::StaticSignature, idx: usize) -> FuncRef {
|
||||
FuncInstance::alloc_host(signature.into(), idx)
|
||||
}
|
||||
|
||||
/// Import resolver for wasmi
|
||||
/// Maps all functions that runtime support to the corresponding contract import
|
||||
/// entries.
|
||||
/// Also manages initial memory request from the runtime.
|
||||
#[derive(Default)]
|
||||
pub struct ImportResolver {
|
||||
max_memory: u32,
|
||||
memory: RefCell<Option<MemoryRef>>,
|
||||
}
|
||||
|
||||
impl ImportResolver {
|
||||
/// New import resolver with specifed maximum amount of inital memory (in wasm pages = 64kb)
|
||||
pub fn with_limit(max_memory: u32) -> ImportResolver {
|
||||
ImportResolver {
|
||||
max_memory: max_memory,
|
||||
memory: RefCell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns memory that was instantiated during the contract module
|
||||
/// start. If contract does not use memory at all, the dummy memory of length (0, 0)
|
||||
/// will be created instead. So this method always returns memory instance
|
||||
/// unless errored.
|
||||
pub fn memory_ref(&self) -> MemoryRef {
|
||||
{
|
||||
let mut mem_ref = self.memory.borrow_mut();
|
||||
if mem_ref.is_none() {
|
||||
*mem_ref = Some(
|
||||
MemoryInstance::alloc(
|
||||
memory_units::Pages(0),
|
||||
Some(memory_units::Pages(0)),
|
||||
).expect("Memory allocation (0, 0) should not fail; qed")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.memory.borrow().clone().expect("it is either existed or was created as (0, 0) above; qed")
|
||||
}
|
||||
|
||||
/// Returns memory size module initially requested
|
||||
pub fn memory_size(&self) -> Result<u32, Error> {
|
||||
Ok(self.memory_ref().current_size().0 as u32)
|
||||
}
|
||||
}
|
||||
|
||||
impl wasmi::ModuleImportResolver for ImportResolver {
|
||||
fn resolve_func(&self, field_name: &str, _signature: &Signature) -> Result<FuncRef, Error> {
|
||||
let func_ref = match field_name {
|
||||
"storage_read" => host(signatures::STORAGE_READ, ids::STORAGE_READ_FUNC),
|
||||
"storage_write" => host(signatures::STORAGE_WRITE, ids::STORAGE_WRITE_FUNC),
|
||||
"ret" => host(signatures::RET, ids::RET_FUNC),
|
||||
"gas" => host(signatures::GAS, ids::GAS_FUNC),
|
||||
"input_length" => host(signatures::INPUT_LENGTH, ids::INPUT_LENGTH_FUNC),
|
||||
"fetch_input" => host(signatures::FETCH_INPUT, ids::FETCH_INPUT_FUNC),
|
||||
"panic" => host(signatures::PANIC, ids::PANIC_FUNC),
|
||||
"debug" => host(signatures::DEBUG, ids::DEBUG_FUNC),
|
||||
"ccall" => host(signatures::CCALL, ids::CCALL_FUNC),
|
||||
"dcall" => host(signatures::DCALL, ids::DCALL_FUNC),
|
||||
"scall" => host(signatures::SCALL, ids::SCALL_FUNC),
|
||||
"value" => host(signatures::VALUE, ids::VALUE_FUNC),
|
||||
"create" => host(signatures::CREATE, ids::CREATE_FUNC),
|
||||
"suicide" => host(signatures::SUICIDE, ids::SUICIDE_FUNC),
|
||||
"blockhash" => host(signatures::BLOCKHASH, ids::BLOCKHASH_FUNC),
|
||||
"blocknumber" => host(signatures::BLOCKNUMBER, ids::BLOCKNUMBER_FUNC),
|
||||
"coinbase" => host(signatures::COINBASE, ids::COINBASE_FUNC),
|
||||
"difficulty" => host(signatures::DIFFICULTY, ids::DIFFICULTY_FUNC),
|
||||
"gaslimit" => host(signatures::GASLIMIT, ids::GASLIMIT_FUNC),
|
||||
"timestamp" => host(signatures::TIMESTAMP, ids::TIMESTAMP_FUNC),
|
||||
"address" => host(signatures::ADDRESS, ids::ADDRESS_FUNC),
|
||||
"sender" => host(signatures::SENDER, ids::SENDER_FUNC),
|
||||
"origin" => host(signatures::ORIGIN, ids::ORIGIN_FUNC),
|
||||
"elog" => host(signatures::ELOG, ids::ELOG_FUNC),
|
||||
_ => {
|
||||
return Err(wasmi::Error::Instantiation(
|
||||
format!("Export {} not found", field_name),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(func_ref)
|
||||
}
|
||||
|
||||
fn resolve_memory(
|
||||
&self,
|
||||
field_name: &str,
|
||||
descriptor: &MemoryDescriptor,
|
||||
) -> Result<MemoryRef, Error> {
|
||||
if field_name == "memory" {
|
||||
let effective_max = descriptor.maximum().unwrap_or(self.max_memory + 1);
|
||||
if descriptor.initial() > self.max_memory || effective_max > self.max_memory
|
||||
{
|
||||
Err(Error::Instantiation("Module requested too much memory".to_owned()))
|
||||
} else {
|
||||
let mem = MemoryInstance::alloc(
|
||||
memory_units::Pages(descriptor.initial() as usize),
|
||||
descriptor.maximum().map(|x| memory_units::Pages(x as usize)),
|
||||
)?;
|
||||
*self.memory.borrow_mut() = Some(mem.clone());
|
||||
Ok(mem)
|
||||
}
|
||||
} else {
|
||||
Err(Error::Instantiation("Memory imported under unknown name".to_owned()))
|
||||
}
|
||||
}
|
||||
}
|
@ -16,184 +16,165 @@
|
||||
|
||||
//! Wasm Interpreter
|
||||
|
||||
extern crate vm;
|
||||
extern crate byteorder;
|
||||
extern crate ethcore_logger;
|
||||
extern crate ethcore_util as util;
|
||||
extern crate ethcore_bigint as bigint;
|
||||
#[macro_use] extern crate log;
|
||||
extern crate ethcore_logger;
|
||||
extern crate byteorder;
|
||||
extern crate libc;
|
||||
extern crate parity_wasm;
|
||||
extern crate wasm_utils;
|
||||
extern crate vm;
|
||||
extern crate pwasm_utils as wasm_utils;
|
||||
extern crate wasmi;
|
||||
|
||||
mod runtime;
|
||||
mod ptr;
|
||||
mod result;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
mod env;
|
||||
mod panic_payload;
|
||||
|
||||
const DEFAULT_STACK_SPACE: u32 = 5 * 1024 * 1024;
|
||||
|
||||
use parity_wasm::{interpreter, elements};
|
||||
use parity_wasm::interpreter::ModuleInstanceInterface;
|
||||
mod parser;
|
||||
|
||||
use vm::{GasLeft, ReturnData, ActionParams};
|
||||
use self::runtime::{Runtime, RuntimeContext, UserTrap};
|
||||
use wasmi::{Error as InterpreterError, Trap};
|
||||
|
||||
pub use self::runtime::InterpreterError;
|
||||
use runtime::{Runtime, RuntimeContext};
|
||||
|
||||
const DEFAULT_RESULT_BUFFER: usize = 1024;
|
||||
use bigint::uint::U256;
|
||||
|
||||
/// Wrapped interpreter error
|
||||
#[derive(Debug)]
|
||||
pub struct Error(InterpreterError);
|
||||
pub enum Error {
|
||||
Interpreter(InterpreterError),
|
||||
Trap(Trap),
|
||||
}
|
||||
|
||||
impl From<InterpreterError> for Error {
|
||||
fn from(e: InterpreterError) -> Self {
|
||||
Error(e)
|
||||
Error::Interpreter(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Trap> for Error {
|
||||
fn from(e: Trap) -> Self {
|
||||
Error::Trap(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for vm::Error {
|
||||
fn from(e: Error) -> Self {
|
||||
vm::Error::Wasm(format!("Wasm runtime error: {:?}", e.0))
|
||||
match e {
|
||||
Error::Interpreter(e) => vm::Error::Wasm(format!("Wasm runtime error: {:?}", e)),
|
||||
Error::Trap(e) => vm::Error::Wasm(format!("Wasm contract trap: {:?}", e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UserTrap> for vm::Error {
|
||||
fn from(e: UserTrap) -> Self { e.into() }
|
||||
}
|
||||
|
||||
/// Wasm interpreter instance
|
||||
pub struct WasmInterpreter {
|
||||
program: runtime::InterpreterProgramInstance,
|
||||
result: Vec<u8>,
|
||||
pub struct WasmInterpreter;
|
||||
|
||||
impl From<runtime::Error> for vm::Error {
|
||||
fn from(e: runtime::Error) -> Self {
|
||||
vm::Error::Wasm(format!("Wasm runtime error: {:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
impl WasmInterpreter {
|
||||
/// New wasm interpreter instance
|
||||
pub fn new() -> Result<WasmInterpreter, Error> {
|
||||
Ok(WasmInterpreter {
|
||||
program: interpreter::ProgramInstance::new()?,
|
||||
result: Vec::with_capacity(DEFAULT_RESULT_BUFFER),
|
||||
})
|
||||
}
|
||||
enum ExecutionOutcome {
|
||||
Suicide,
|
||||
Return,
|
||||
NotSpecial,
|
||||
}
|
||||
|
||||
impl vm::Vm for WasmInterpreter {
|
||||
|
||||
fn exec(&mut self, params: ActionParams, ext: &mut vm::Ext) -> vm::Result<GasLeft> {
|
||||
use parity_wasm::elements::Deserialize;
|
||||
let (module, data) = parser::payload(¶ms, ext.schedule().wasm())?;
|
||||
|
||||
let code = params.code.expect("exec is only called on contract with code; qed");
|
||||
let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?;
|
||||
|
||||
trace!(target: "wasm", "Started wasm interpreter with code.len={:?}", code.len());
|
||||
let instantiation_resolver = env::ImportResolver::with_limit(16);
|
||||
|
||||
let env_instance = self.program.module("env")
|
||||
// prefer explicit panic here
|
||||
.expect("Wasm program to contain env module");
|
||||
let module_instance = wasmi::ModuleInstance::new(
|
||||
&loaded_module,
|
||||
&wasmi::ImportsBuilder::new().with_resolver("env", &instantiation_resolver)
|
||||
).map_err(Error::Interpreter)?;
|
||||
|
||||
let env_memory = env_instance.memory(interpreter::ItemIndex::Internal(0))
|
||||
// prefer explicit panic here
|
||||
.expect("Linear memory to exist in wasm runtime");
|
||||
|
||||
if params.gas > ::std::u64::MAX.into() {
|
||||
return Err(vm::Error::Wasm("Wasm interpreter cannot run contracts with gas >= 2^64".to_owned()));
|
||||
}
|
||||
|
||||
let mut runtime = Runtime::with_params(
|
||||
ext,
|
||||
env_memory,
|
||||
DEFAULT_STACK_SPACE,
|
||||
params.gas.low_u64(),
|
||||
RuntimeContext {
|
||||
address: params.address,
|
||||
sender: params.sender,
|
||||
origin: params.origin,
|
||||
code_address: params.code_address,
|
||||
value: params.value.value(),
|
||||
},
|
||||
&self.program,
|
||||
);
|
||||
|
||||
let (mut cursor, data_position) = match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
let module_size = parity_wasm::peek_size(&*code);
|
||||
(
|
||||
::std::io::Cursor::new(&code[..module_size]),
|
||||
module_size
|
||||
)
|
||||
},
|
||||
vm::ParamsType::Separate => {
|
||||
(::std::io::Cursor::new(&code[..]), 0)
|
||||
},
|
||||
};
|
||||
|
||||
let contract_module = wasm_utils::inject_gas_counter(
|
||||
elements::Module::deserialize(
|
||||
&mut cursor
|
||||
).map_err(|err| {
|
||||
vm::Error::Wasm(format!("Error deserializing contract code ({:?})", err))
|
||||
})?,
|
||||
runtime.gas_rules(),
|
||||
);
|
||||
|
||||
let data_section_length = contract_module.data_section()
|
||||
.map(|section| section.entries().iter().fold(0, |sum, entry| sum + entry.value().len()))
|
||||
.unwrap_or(0)
|
||||
as u64;
|
||||
|
||||
let static_segment_cost = data_section_length * runtime.ext().schedule().wasm.static_region as u64;
|
||||
runtime.charge(|_| static_segment_cost).map_err(Error)?;
|
||||
|
||||
let d_ptr = {
|
||||
match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
runtime.write_descriptor(
|
||||
if data_position < code.len() { &code[data_position..] } else { &[] }
|
||||
).map_err(Error)?
|
||||
},
|
||||
vm::ParamsType::Separate => {
|
||||
runtime.write_descriptor(¶ms.data.unwrap_or_default())
|
||||
.map_err(Error)?
|
||||
}
|
||||
}
|
||||
};
|
||||
let adjusted_gas = params.gas * U256::from(ext.schedule().wasm().opcodes_div) /
|
||||
U256::from(ext.schedule().wasm().opcodes_mul);
|
||||
|
||||
if adjusted_gas > ::std::u64::MAX.into()
|
||||
{
|
||||
let execution_params = runtime.execution_params()
|
||||
.add_argument(interpreter::RuntimeValue::I32(d_ptr.as_raw() as i32));
|
||||
|
||||
let module_instance = self.program.add_module("contract", contract_module, Some(&execution_params.externals))
|
||||
.map_err(|err| {
|
||||
trace!(target: "wasm", "Error adding contract module: {:?}", err);
|
||||
vm::Error::from(Error(err))
|
||||
})?;
|
||||
|
||||
match module_instance.execute_export("_call", execution_params) {
|
||||
Ok(_) => { },
|
||||
Err(interpreter::Error::User(UserTrap::Suicide)) => { },
|
||||
Err(err) => {
|
||||
trace!(target: "wasm", "Error executing contract: {:?}", err);
|
||||
return Err(vm::Error::from(Error(err)))
|
||||
}
|
||||
}
|
||||
return Err(vm::Error::Wasm("Wasm interpreter cannot run contracts with gas (wasm adjusted) >= 2^64".to_owned()));
|
||||
}
|
||||
|
||||
let result = result::WasmResult::new(d_ptr);
|
||||
if result.peek_empty(&*runtime.memory()).map_err(|e| Error(e))? {
|
||||
let initial_memory = instantiation_resolver.memory_size().map_err(Error::Interpreter)?;
|
||||
trace!(target: "wasm", "Contract requested {:?} pages of initial memory", initial_memory);
|
||||
|
||||
let (gas_left, result) = {
|
||||
let mut runtime = Runtime::with_params(
|
||||
ext,
|
||||
instantiation_resolver.memory_ref(),
|
||||
// cannot overflow, checked above
|
||||
adjusted_gas.low_u64(),
|
||||
data.to_vec(),
|
||||
RuntimeContext {
|
||||
address: params.address,
|
||||
sender: params.sender,
|
||||
origin: params.origin,
|
||||
code_address: params.code_address,
|
||||
value: params.value.value(),
|
||||
},
|
||||
);
|
||||
|
||||
// cannot overflow if static_region < 2^16,
|
||||
// initial_memory ∈ [0..2^32)
|
||||
// total_charge <- static_region * 2^32 * 2^16
|
||||
// total_charge ∈ [0..2^64) if static_region ∈ [0..2^16)
|
||||
// qed
|
||||
assert!(runtime.schedule().wasm().initial_mem < 1 << 16);
|
||||
runtime.charge(|s| initial_memory as u64 * s.wasm().initial_mem as u64)?;
|
||||
|
||||
let module_instance = module_instance.run_start(&mut runtime).map_err(Error::Trap)?;
|
||||
|
||||
let invoke_result = module_instance.invoke_export("call", &[], &mut runtime);
|
||||
|
||||
let mut execution_outcome = ExecutionOutcome::NotSpecial;
|
||||
if let Err(InterpreterError::Trap(ref trap)) = invoke_result {
|
||||
if let wasmi::TrapKind::Host(ref boxed) = *trap.kind() {
|
||||
let ref runtime_err = boxed.downcast_ref::<runtime::Error>()
|
||||
.expect("Host errors other than runtime::Error never produced; qed");
|
||||
|
||||
match **runtime_err {
|
||||
runtime::Error::Suicide => { execution_outcome = ExecutionOutcome::Suicide; },
|
||||
runtime::Error::Return => { execution_outcome = ExecutionOutcome::Return; },
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let (ExecutionOutcome::NotSpecial, Err(e)) = (execution_outcome, invoke_result) {
|
||||
trace!(target: "wasm", "Error executing contract: {:?}", e);
|
||||
return Err(vm::Error::from(Error::from(e)));
|
||||
}
|
||||
|
||||
(
|
||||
runtime.gas_left().expect("Cannot fail since it was not updated since last charge"),
|
||||
runtime.into_result(),
|
||||
)
|
||||
};
|
||||
|
||||
let gas_left =
|
||||
U256::from(gas_left) * U256::from(ext.schedule().wasm().opcodes_mul)
|
||||
/ U256::from(ext.schedule().wasm().opcodes_div);
|
||||
|
||||
if result.is_empty() {
|
||||
trace!(target: "wasm", "Contract execution result is empty.");
|
||||
Ok(GasLeft::Known(runtime.gas_left()?.into()))
|
||||
Ok(GasLeft::Known(gas_left))
|
||||
} else {
|
||||
self.result.clear();
|
||||
// todo: use memory views to avoid copy
|
||||
self.result.extend(result.pop(&*runtime.memory()).map_err(|e| Error(e.into()))?);
|
||||
let len = self.result.len();
|
||||
let len = result.len();
|
||||
Ok(GasLeft::NeedsReturn {
|
||||
gas_left: runtime.gas_left().map_err(|e| Error(e.into()))?.into(),
|
||||
gas_left: gas_left,
|
||||
data: ReturnData::new(
|
||||
::std::mem::replace(&mut self.result, Vec::with_capacity(DEFAULT_RESULT_BUFFER)),
|
||||
result,
|
||||
0,
|
||||
len,
|
||||
),
|
||||
|
98
ethcore/wasm/src/parser.rs
Normal file
98
ethcore/wasm/src/parser.rs
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! ActionParams parser for wasm
|
||||
|
||||
use vm;
|
||||
use wasm_utils::{self, rules};
|
||||
use parity_wasm::elements::{self, Deserialize};
|
||||
use parity_wasm::peek_size;
|
||||
|
||||
fn gas_rules(wasm_costs: &vm::WasmCosts) -> rules::Set {
|
||||
rules::Set::new(
|
||||
wasm_costs.regular,
|
||||
{
|
||||
let mut vals = ::std::collections::HashMap::with_capacity(8);
|
||||
vals.insert(rules::InstructionType::Load, rules::Metering::Fixed(wasm_costs.mem as u32));
|
||||
vals.insert(rules::InstructionType::Store, rules::Metering::Fixed(wasm_costs.mem as u32));
|
||||
vals.insert(rules::InstructionType::Div, rules::Metering::Fixed(wasm_costs.div as u32));
|
||||
vals.insert(rules::InstructionType::Mul, rules::Metering::Fixed(wasm_costs.mul as u32));
|
||||
vals
|
||||
})
|
||||
.with_grow_cost(wasm_costs.grow_mem)
|
||||
.with_forbidden_floats()
|
||||
}
|
||||
|
||||
/// Splits payload to code and data according to params.params_type, also
|
||||
/// loads the module instance from payload and injects gas counter according
|
||||
/// to schedule.
|
||||
pub fn payload<'a>(params: &'a vm::ActionParams, wasm_costs: &vm::WasmCosts)
|
||||
-> Result<(elements::Module, &'a [u8]), vm::Error>
|
||||
{
|
||||
let code = match params.code {
|
||||
Some(ref code) => &code[..],
|
||||
None => { return Err(vm::Error::Wasm("Invalid wasm call".to_owned())); }
|
||||
};
|
||||
|
||||
let (mut cursor, data_position) = match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
let module_size = peek_size(&*code);
|
||||
(
|
||||
::std::io::Cursor::new(&code[..module_size]),
|
||||
module_size
|
||||
)
|
||||
},
|
||||
vm::ParamsType::Separate => {
|
||||
(::std::io::Cursor::new(&code[..]), 0)
|
||||
},
|
||||
};
|
||||
|
||||
let deserialized_module = elements::Module::deserialize(
|
||||
&mut cursor
|
||||
).map_err(|err| {
|
||||
vm::Error::Wasm(format!("Error deserializing contract code ({:?})", err))
|
||||
})?;
|
||||
|
||||
if deserialized_module.memory_section().map_or(false, |ms| ms.entries().len() > 0) {
|
||||
// According to WebAssembly spec, internal memory is hidden from embedder and should not
|
||||
// be interacted with. So we disable this kind of modules at decoding level.
|
||||
return Err(vm::Error::Wasm(format!("Malformed wasm module: internal memory")));
|
||||
}
|
||||
|
||||
let contract_module = wasm_utils::inject_gas_counter(
|
||||
deserialized_module,
|
||||
&gas_rules(wasm_costs),
|
||||
).map_err(|_| vm::Error::Wasm(format!("Wasm contract error: bytecode invalid")))?;
|
||||
|
||||
let contract_module = wasm_utils::stack_height::inject_limiter(
|
||||
contract_module,
|
||||
wasm_costs.max_stack_height,
|
||||
).map_err(|_| vm::Error::Wasm(format!("Wasm contract error: stack limiter failure")))?;
|
||||
|
||||
let data = match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
if data_position < code.len() { &code[data_position..] } else { &[] }
|
||||
},
|
||||
vm::ParamsType::Separate => {
|
||||
match params.data {
|
||||
Some(ref s) => &s[..],
|
||||
None => &[]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok((contract_module, data))
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Wasm bound-checked ptr
|
||||
|
||||
use super::runtime::{InterpreterMemoryInstance, InterpreterError, UserTrap};
|
||||
|
||||
/// Bound-checked wrapper for webassembly memory
|
||||
pub struct WasmPtr(u32);
|
||||
|
||||
/// Error in bound check
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
AccessViolation,
|
||||
}
|
||||
|
||||
impl From<u32> for WasmPtr {
|
||||
fn from(raw: u32) -> Self {
|
||||
WasmPtr(raw)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for InterpreterError {
|
||||
fn from(_e: Error) -> Self {
|
||||
UserTrap::MemoryAccessViolation.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl WasmPtr {
|
||||
// todo: use memory view when they are on
|
||||
/// Check memory range and return data with given length starting from the current pointer value
|
||||
pub fn slice(&self, len: u32, mem: &InterpreterMemoryInstance) -> Result<Vec<u8>, Error> {
|
||||
mem.get(self.0, len as usize).map_err(|_| Error::AccessViolation)
|
||||
}
|
||||
|
||||
// todo: maybe 2gb limit can be enhanced
|
||||
/// Convert i32 from wasm stack to the wrapped pointer
|
||||
pub fn from_i32(raw_ptr: i32) -> Result<Self, Error> {
|
||||
if raw_ptr < 0 { return Err(Error::AccessViolation); }
|
||||
Ok(WasmPtr(raw_ptr as u32))
|
||||
}
|
||||
|
||||
/// Return pointer raw value
|
||||
pub fn as_raw(&self) -> u32 { self.0 }
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user