Merge branch 'master' into rotating-key
Conflicts: ethcore/src/account_provider/mod.rs rpc/src/v1/types/mod.rs.in
This commit is contained in:
commit
93230dd4c2
@ -20,7 +20,7 @@ linux-stable:
|
|||||||
- stable
|
- stable
|
||||||
- triggers
|
- triggers
|
||||||
script:
|
script:
|
||||||
- cargo build --release $CARGOFLAGS
|
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
- md5sum target/release/parity > parity.md5
|
- md5sum target/release/parity > parity.md5
|
||||||
- sh scripts/deb-build.sh amd64
|
- sh scripts/deb-build.sh amd64
|
||||||
@ -52,7 +52,7 @@ linux-beta:
|
|||||||
- stable
|
- stable
|
||||||
- triggers
|
- triggers
|
||||||
script:
|
script:
|
||||||
- cargo build --release $CARGOFLAGS
|
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
@ -71,7 +71,7 @@ linux-nightly:
|
|||||||
- stable
|
- stable
|
||||||
- triggers
|
- triggers
|
||||||
script:
|
script:
|
||||||
- cargo build --release $CARGOFLAGS
|
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
@ -92,7 +92,7 @@ linux-centos:
|
|||||||
script:
|
script:
|
||||||
- export CXX="g++"
|
- export CXX="g++"
|
||||||
- export CC="gcc"
|
- export CC="gcc"
|
||||||
- cargo build --release $CARGOFLAGS
|
- cargo build -j $(nproc) --release $CARGOFLAGS
|
||||||
- strip target/release/parity
|
- strip target/release/parity
|
||||||
- md5sum target/release/parity > parity.md5
|
- md5sum target/release/parity > parity.md5
|
||||||
- aws configure set aws_access_key_id $s3_key
|
- aws configure set aws_access_key_id $s3_key
|
||||||
@ -119,7 +119,7 @@ linux-i686:
|
|||||||
script:
|
script:
|
||||||
- export HOST_CC=gcc
|
- export HOST_CC=gcc
|
||||||
- export HOST_CXX=g++
|
- export HOST_CXX=g++
|
||||||
- cargo build --target i686-unknown-linux-gnu --release $CARGOFLAGS
|
- cargo build -j $(nproc) --target i686-unknown-linux-gnu --release $CARGOFLAGS
|
||||||
- strip target/i686-unknown-linux-gnu/release/parity
|
- strip target/i686-unknown-linux-gnu/release/parity
|
||||||
- md5sum target/i686-unknown-linux-gnu/release/parity > parity.md5
|
- md5sum target/i686-unknown-linux-gnu/release/parity > parity.md5
|
||||||
- sh scripts/deb-build.sh i386
|
- sh scripts/deb-build.sh i386
|
||||||
@ -161,7 +161,7 @@ linux-armv7:
|
|||||||
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
|
- echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
|
||||||
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
|
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
|
||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target armv7-unknown-linux-gnueabihf --release $CARGOFLAGS
|
- cargo build -j $(nproc) --target armv7-unknown-linux-gnueabihf --release $CARGOFLAGS
|
||||||
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
|
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
|
||||||
- md5sum target/armv7-unknown-linux-gnueabihf/release/parity > parity.md5
|
- md5sum target/armv7-unknown-linux-gnueabihf/release/parity > parity.md5
|
||||||
- sh scripts/deb-build.sh armhf
|
- sh scripts/deb-build.sh armhf
|
||||||
@ -203,7 +203,7 @@ linux-arm:
|
|||||||
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
|
- echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
|
||||||
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
|
- echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
|
||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target arm-unknown-linux-gnueabihf --release $CARGOFLAGS
|
- cargo build -j $(nproc) --target arm-unknown-linux-gnueabihf --release $CARGOFLAGS
|
||||||
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
|
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
|
||||||
- md5sum target/arm-unknown-linux-gnueabihf/release/parity > parity.md5
|
- md5sum target/arm-unknown-linux-gnueabihf/release/parity > parity.md5
|
||||||
- sh scripts/deb-build.sh armhf
|
- sh scripts/deb-build.sh armhf
|
||||||
@ -245,7 +245,7 @@ linux-armv6:
|
|||||||
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
|
- echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
|
||||||
- echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config
|
- echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config
|
||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target arm-unknown-linux-gnueabi --release $CARGOFLAGS
|
- cargo build -j $(nproc) --target arm-unknown-linux-gnueabi --release $CARGOFLAGS
|
||||||
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
|
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
|
||||||
- md5sum target/arm-unknown-linux-gnueabi/release/parity > parity.md5
|
- md5sum target/arm-unknown-linux-gnueabi/release/parity > parity.md5
|
||||||
- aws configure set aws_access_key_id $s3_key
|
- aws configure set aws_access_key_id $s3_key
|
||||||
@ -280,7 +280,7 @@ linux-aarch64:
|
|||||||
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
|
- echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
|
||||||
- echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config
|
- echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config
|
||||||
- cat .cargo/config
|
- cat .cargo/config
|
||||||
- cargo build --target aarch64-unknown-linux-gnu --release $CARGOFLAGS
|
- cargo build -j $(nproc) --target aarch64-unknown-linux-gnu --release $CARGOFLAGS
|
||||||
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
|
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
|
||||||
- md5sum target/aarch64-unknown-linux-gnu/release/parity > parity.md5
|
- md5sum target/aarch64-unknown-linux-gnu/release/parity > parity.md5
|
||||||
- sh scripts/deb-build.sh arm64
|
- sh scripts/deb-build.sh arm64
|
||||||
@ -312,8 +312,8 @@ darwin:
|
|||||||
- stable
|
- stable
|
||||||
- triggers
|
- triggers
|
||||||
script:
|
script:
|
||||||
- cargo build --release -p ethstore $CARGOFLAGS
|
- cargo build -j 8 --release -p ethstore #$CARGOFLAGS
|
||||||
- cargo build --release $CARGOFLAGS
|
- cargo build -j 8 --release #$CARGOFLAGS
|
||||||
- rm -rf parity.md5
|
- rm -rf parity.md5
|
||||||
- md5sum target/release/parity > parity.md5
|
- md5sum target/release/parity > parity.md5
|
||||||
- packagesbuild -v mac/Parity.pkgproj
|
- packagesbuild -v mac/Parity.pkgproj
|
||||||
@ -350,7 +350,7 @@ windows:
|
|||||||
- set RUST_BACKTRACE=1
|
- set RUST_BACKTRACE=1
|
||||||
- set RUSTFLAGS=%RUSTFLAGS%
|
- set RUSTFLAGS=%RUSTFLAGS%
|
||||||
- rustup default stable-x86_64-pc-windows-msvc
|
- rustup default stable-x86_64-pc-windows-msvc
|
||||||
- cargo build --release %CARGOFLAGS%
|
- cargo build -j 8 --release #%CARGOFLAGS%
|
||||||
- curl -sL --url "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -o nsis\SimpleFC.dll
|
- curl -sL --url "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -o nsis\SimpleFC.dll
|
||||||
- curl -sL --url "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -o nsis\vc_redist.x64.exe
|
- curl -sL --url "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -o nsis\vc_redist.x64.exe
|
||||||
- signtool sign /f %keyfile% /p %certpass% target\release\parity.exe
|
- signtool sign /f %keyfile% /p %certpass% target\release\parity.exe
|
||||||
@ -401,7 +401,7 @@ test-darwin:
|
|||||||
- git submodule update --init --recursive
|
- git submodule update --init --recursive
|
||||||
script:
|
script:
|
||||||
- export RUST_BACKTRACE=1
|
- export RUST_BACKTRACE=1
|
||||||
- ./test.sh $CARGOFLAGS --no-release
|
- ./test.sh $CARGOFLAGS
|
||||||
tags:
|
tags:
|
||||||
- osx
|
- osx
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
@ -413,7 +413,7 @@ test-windows:
|
|||||||
- git submodule update --init --recursive
|
- git submodule update --init --recursive
|
||||||
script:
|
script:
|
||||||
- set RUST_BACKTRACE=1
|
- set RUST_BACKTRACE=1
|
||||||
- cargo test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p ethcore-dapps -p ethcore-rpc -p ethcore-signer -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity %CARGOFLAGS% --verbose --release
|
- cargo -j 8 test --features json-tests -p rlp -p ethash -p ethcore -p ethcore-bigint -p ethcore-dapps -p ethcore-rpc -p ethcore-signer -p ethcore-util -p ethcore-network -p ethcore-io -p ethkey -p ethstore -p ethsync -p ethcore-ipc -p ethcore-ipc-tests -p ethcore-ipc-nano -p parity %CARGOFLAGS% --verbose --release
|
||||||
tags:
|
tags:
|
||||||
- rust-windows
|
- rust-windows
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
@ -428,7 +428,7 @@ test-rust-stable:
|
|||||||
script:
|
script:
|
||||||
- export RUST_BACKTRACE=1
|
- export RUST_BACKTRACE=1
|
||||||
- echo $JS_FILES_MODIFIED
|
- echo $JS_FILES_MODIFIED
|
||||||
- if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "skip js test"&./test.sh $CARGOFLAGS --no-release; else echo "skip rust test"&./js/scripts/lint.sh&./js/scripts/test.sh&./js/scripts/build.sh; fi
|
- if [ "$JS_FILES_MODIFIED" = 0 ]; then echo "skip js test"&./test.sh $CARGOFLAGS; else echo "skip rust test"&./js/scripts/lint.sh&./js/scripts/test.sh&./js/scripts/build.sh; fi
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-stable
|
- rust-stable
|
||||||
@ -457,7 +457,7 @@ test-rust-beta:
|
|||||||
script:
|
script:
|
||||||
- export RUST_BACKTRACE=1
|
- export RUST_BACKTRACE=1
|
||||||
- echo $JS_FILES_MODIFIED
|
- echo $JS_FILES_MODIFIED
|
||||||
- ./test.sh $CARGOFLAGS --no-release
|
- ./test.sh $CARGOFLAGS
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-beta
|
- rust-beta
|
||||||
@ -471,7 +471,7 @@ test-rust-nightly:
|
|||||||
- git submodule update --init --recursive
|
- git submodule update --init --recursive
|
||||||
script:
|
script:
|
||||||
- export RUST_BACKTRACE=1
|
- export RUST_BACKTRACE=1
|
||||||
- ./test.sh $CARGOFLAGS --no-release
|
- ./test.sh $CARGOFLAGS
|
||||||
tags:
|
tags:
|
||||||
- rust
|
- rust
|
||||||
- rust-nightly
|
- rust-nightly
|
||||||
|
@ -16,7 +16,7 @@ git:
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- rust: stable
|
- rust: stable
|
||||||
env: RUN_TESTS="true" TEST_OPTIONS="--no-release"
|
env: RUN_TESTS="true" TEST_OPTIONS=""
|
||||||
- rust: stable
|
- rust: stable
|
||||||
env: RUN_COVERAGE="true"
|
env: RUN_COVERAGE="true"
|
||||||
- rust: stable
|
- rust: stable
|
||||||
@ -71,8 +71,7 @@ install:
|
|||||||
script:
|
script:
|
||||||
- if [ "$RUN_TESTS" = "true" ]; then
|
- if [ "$RUN_TESTS" = "true" ]; then
|
||||||
./js/scripts/lint.sh &&
|
./js/scripts/lint.sh &&
|
||||||
./js/scripts/test.sh &&
|
travis_wait 40 ./test.sh $TEST_OPTIONS;
|
||||||
./test.sh $TEST_OPTIONS --verbose;
|
|
||||||
fi
|
fi
|
||||||
- if [ "$RUN_COVERAGE" = "true" ]; then ./scripts/cov.sh "$KCOV_CMD"; fi
|
- if [ "$RUN_COVERAGE" = "true" ]; then ./scripts/cov.sh "$KCOV_CMD"; fi
|
||||||
|
|
||||||
|
253
Cargo.lock
generated
253
Cargo.lock
generated
@ -10,24 +10,23 @@ dependencies = [
|
|||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore 1.5.0",
|
"ethcore 1.5.0",
|
||||||
"ethcore-dapps 1.5.0",
|
"ethcore-dapps 1.5.0",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-hash-fetch 1.5.0",
|
"ethcore-hash-fetch 1.5.0",
|
||||||
"ethcore-io 1.5.0",
|
"ethcore-io 1.5.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-ipc-codegen 1.4.0",
|
"ethcore-ipc-codegen 1.5.0",
|
||||||
"ethcore-ipc-hypervisor 1.2.0",
|
"ethcore-ipc-hypervisor 1.2.0",
|
||||||
"ethcore-ipc-nano 1.4.0",
|
"ethcore-ipc-nano 1.5.0",
|
||||||
"ethcore-ipc-tests 0.1.0",
|
"ethcore-ipc-tests 0.1.0",
|
||||||
"ethcore-logger 1.5.0",
|
"ethcore-logger 1.5.0",
|
||||||
"ethcore-rpc 1.5.0",
|
"ethcore-rpc 1.5.0",
|
||||||
"ethcore-signer 1.5.0",
|
"ethcore-signer 1.5.0",
|
||||||
"ethcore-stratum 1.4.0",
|
"ethcore-stratum 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"ethsync 1.5.0",
|
"ethsync 1.5.0",
|
||||||
"fdlimit 0.1.0",
|
"fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)",
|
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -175,6 +174,15 @@ dependencies = [
|
|||||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cookie"
|
||||||
|
version = "0.3.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam"
|
name = "crossbeam"
|
||||||
version = "0.2.9"
|
version = "0.2.9"
|
||||||
@ -265,7 +273,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethash"
|
name = "ethash"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -283,20 +291,20 @@ dependencies = [
|
|||||||
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethash 1.4.0",
|
"ethash 1.5.0",
|
||||||
"ethcore-bloom-journal 0.1.0",
|
"ethcore-bloom-journal 0.1.0",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-io 1.5.0",
|
"ethcore-io 1.5.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-ipc-codegen 1.4.0",
|
"ethcore-ipc-codegen 1.5.0",
|
||||||
"ethcore-ipc-nano 1.4.0",
|
"ethcore-ipc-nano 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
"ethkey 0.2.0",
|
"ethkey 0.2.0",
|
||||||
"ethstore 0.1.0",
|
"ethstore 0.1.0",
|
||||||
"evmjit 1.4.0",
|
"evmjit 1.5.0",
|
||||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)",
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -335,20 +343,20 @@ version = "1.5.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-hash-fetch 1.5.0",
|
"ethcore-hash-fetch 1.5.0",
|
||||||
"ethcore-rpc 1.5.0",
|
"ethcore-rpc 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"fetch 0.1.0",
|
"fetch 0.1.0",
|
||||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)",
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
|
"jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-ui 1.4.0",
|
"parity-ui 1.5.0",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -362,7 +370,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethcore-devtools"
|
name = "ethcore-devtools"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -392,9 +400,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethcore-ipc"
|
name = "ethcore-ipc"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -402,7 +410,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethcore-ipc-codegen"
|
name = "ethcore-ipc-codegen"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"quasi 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"quasi 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -415,9 +423,9 @@ dependencies = [
|
|||||||
name = "ethcore-ipc-hypervisor"
|
name = "ethcore-ipc-hypervisor"
|
||||||
version = "1.2.0"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-ipc-codegen 1.4.0",
|
"ethcore-ipc-codegen 1.5.0",
|
||||||
"ethcore-ipc-nano 1.4.0",
|
"ethcore-ipc-nano 1.5.0",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||||
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -426,9 +434,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethcore-ipc-nano"
|
name = "ethcore-ipc-nano"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||||
@ -438,10 +446,10 @@ dependencies = [
|
|||||||
name = "ethcore-ipc-tests"
|
name = "ethcore-ipc-tests"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-ipc-codegen 1.4.0",
|
"ethcore-ipc-codegen 1.5.0",
|
||||||
"ethcore-ipc-nano 1.4.0",
|
"ethcore-ipc-nano 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
|
||||||
@ -467,7 +475,7 @@ version = "1.5.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-io 1.5.0",
|
"ethcore-io 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"ethcrypto 0.1.0",
|
"ethcrypto 0.1.0",
|
||||||
@ -491,11 +499,11 @@ name = "ethcore-rpc"
|
|||||||
version = "1.5.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethash 1.4.0",
|
"ethash 1.5.0",
|
||||||
"ethcore 1.5.0",
|
"ethcore 1.5.0",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-io 1.5.0",
|
"ethcore-io 1.5.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"ethcrypto 0.1.0",
|
"ethcrypto 0.1.0",
|
||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
@ -503,9 +511,9 @@ dependencies = [
|
|||||||
"ethstore 0.1.0",
|
"ethstore 0.1.0",
|
||||||
"ethsync 1.5.0",
|
"ethsync 1.5.0",
|
||||||
"fetch 0.1.0",
|
"fetch 0.1.0",
|
||||||
"json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)",
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
|
"jsonrpc-ipc-server 0.2.4 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.1.0",
|
"rlp 0.1.0",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -522,14 +530,14 @@ version = "1.5.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-io 1.5.0",
|
"ethcore-io 1.5.0",
|
||||||
"ethcore-rpc 1.5.0",
|
"ethcore-rpc 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-ui 1.4.0",
|
"parity-ui 1.5.0",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)",
|
"ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)",
|
||||||
@ -537,16 +545,16 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethcore-stratum"
|
name = "ethcore-stratum"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-ipc-codegen 1.4.0",
|
"ethcore-ipc-codegen 1.5.0",
|
||||||
"ethcore-ipc-nano 1.4.0",
|
"ethcore-ipc-nano 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)",
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-tcp-server 0.1.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
|
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
|
||||||
@ -565,7 +573,7 @@ dependencies = [
|
|||||||
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
|
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
|
||||||
"ethcore-bigint 0.1.2",
|
"ethcore-bigint 0.1.2",
|
||||||
"ethcore-bloom-journal 0.1.0",
|
"ethcore-bloom-journal 0.1.0",
|
||||||
"ethcore-devtools 1.4.0",
|
"ethcore-devtools 1.5.0",
|
||||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -653,9 +661,9 @@ dependencies = [
|
|||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore 1.5.0",
|
"ethcore 1.5.0",
|
||||||
"ethcore-io 1.5.0",
|
"ethcore-io 1.5.0",
|
||||||
"ethcore-ipc 1.4.0",
|
"ethcore-ipc 1.5.0",
|
||||||
"ethcore-ipc-codegen 1.4.0",
|
"ethcore-ipc-codegen 1.5.0",
|
||||||
"ethcore-ipc-nano 1.4.0",
|
"ethcore-ipc-nano 1.5.0",
|
||||||
"ethcore-network 1.5.0",
|
"ethcore-network 1.5.0",
|
||||||
"ethcore-util 1.5.0",
|
"ethcore-util 1.5.0",
|
||||||
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -669,14 +677,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "evmjit"
|
name = "evmjit"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fdlimit"
|
name = "fdlimit"
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -686,7 +695,7 @@ name = "fetch"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"https-fetch 0.1.0",
|
"https-fetch 0.1.0",
|
||||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -748,27 +757,6 @@ dependencies = [
|
|||||||
"rustls 0.1.2 (git+https://github.com/ctz/rustls)",
|
"rustls 0.1.2 (git+https://github.com/ctz/rustls)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hyper"
|
|
||||||
version = "0.9.4"
|
|
||||||
source = "git+https://github.com/ethcore/hyper#9e346c1d4bc30cd4142dea9d8a0b117d30858ca4"
|
|
||||||
dependencies = [
|
|
||||||
"cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rotor 0.6.3 (git+https://github.com/ethcore/rotor)",
|
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper"
|
name = "hyper"
|
||||||
version = "0.9.10"
|
version = "0.9.10"
|
||||||
@ -789,6 +777,25 @@ dependencies = [
|
|||||||
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hyper"
|
||||||
|
version = "0.10.0-a.0"
|
||||||
|
source = "git+https://github.com/ethcore/hyper#7d4f7fa0baddcb2b0c523f7c05855d67de94fe88"
|
||||||
|
dependencies = [
|
||||||
|
"cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rotor 0.6.3 (git+https://github.com/ethcore/rotor)",
|
||||||
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"spmc 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "idna"
|
name = "idna"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -831,39 +838,10 @@ name = "itoa"
|
|||||||
version = "0.1.1"
|
version = "0.1.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "json-ipc-server"
|
|
||||||
version = "0.2.4"
|
|
||||||
source = "git+https://github.com/ethcore/json-ipc-server.git#4642cd03ec1d23db89df80d22d5a88e7364ab885"
|
|
||||||
dependencies = [
|
|
||||||
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "json-tcp-server"
|
|
||||||
version = "0.1.0"
|
|
||||||
source = "git+https://github.com/ethcore/json-tcp-server#c2858522274ae56042472bb5d22845a1b85e5338"
|
|
||||||
dependencies = [
|
|
||||||
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-core"
|
name = "jsonrpc-core"
|
||||||
version = "3.0.2"
|
version = "4.0.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parking_lot 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -875,14 +853,44 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-http-server"
|
name = "jsonrpc-http-server"
|
||||||
version = "6.1.1"
|
version = "6.1.1"
|
||||||
source = "git+https://github.com/ethcore/jsonrpc-http-server.git#cd6d4cb37d672cc3057aecd0692876f9e85f3ba5"
|
source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)",
|
||||||
"jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "jsonrpc-ipc-server"
|
||||||
|
version = "0.2.4"
|
||||||
|
source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53"
|
||||||
|
dependencies = [
|
||||||
|
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"miow 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "jsonrpc-tcp-server"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53"
|
||||||
|
dependencies = [
|
||||||
|
"bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)",
|
||||||
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"mio 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "kernel32-sys"
|
name = "kernel32-sys"
|
||||||
version = "0.2.2"
|
version = "0.2.2"
|
||||||
@ -1246,7 +1254,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ui"
|
name = "parity-ui"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-ui-dev 1.4.0",
|
"parity-ui-dev 1.4.0",
|
||||||
"parity-ui-precompiled 1.4.0 (git+https://github.com/ethcore/js-precompiled.git)",
|
"parity-ui-precompiled 1.4.0 (git+https://github.com/ethcore/js-precompiled.git)",
|
||||||
@ -1263,7 +1271,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ui-precompiled"
|
name = "parity-ui-precompiled"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
source = "git+https://github.com/ethcore/js-precompiled.git#cb6836dddf8c9951e056283dcd9e105e97923d07"
|
source = "git+https://github.com/ethcore/js-precompiled.git#1bf7160f6c8f25353d790dbd0935560d3d395727"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -1503,11 +1511,12 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "rotor"
|
name = "rotor"
|
||||||
version = "0.6.3"
|
version = "0.6.3"
|
||||||
source = "git+https://github.com/ethcore/rotor#e63d45137b2eb66d1e085a7c6321a5db8b187576"
|
source = "git+https://github.com/ethcore/rotor#c1a2dd0046c5ea2517a5b637fca8ee2e77021e82"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)",
|
"mio 0.6.1 (git+https://github.com/ethcore/mio)",
|
||||||
"quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2008,6 +2017,7 @@ dependencies = [
|
|||||||
"checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32"
|
"checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32"
|
||||||
"checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a"
|
"checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a"
|
||||||
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
|
"checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245"
|
||||||
|
"checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591"
|
||||||
"checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc"
|
"checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc"
|
||||||
"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>"
|
"checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "<none>"
|
||||||
"checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf"
|
"checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf"
|
||||||
@ -2018,6 +2028,7 @@ dependencies = [
|
|||||||
"checksum env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aba65b63ffcc17ffacd6cf5aa843da7c5a25e3bd4bbe0b7def8b214e411250e5"
|
"checksum env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aba65b63ffcc17ffacd6cf5aa843da7c5a25e3bd4bbe0b7def8b214e411250e5"
|
||||||
"checksum eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)" = "<none>"
|
"checksum eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)" = "<none>"
|
||||||
"checksum ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0c53453517f620847be51943db329276ae52f2e210cfc659e81182864be2f"
|
"checksum ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0c53453517f620847be51943db329276ae52f2e210cfc659e81182864be2f"
|
||||||
|
"checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa"
|
||||||
"checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb"
|
"checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb"
|
||||||
"checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312"
|
"checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312"
|
||||||
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
|
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
|
||||||
@ -2025,17 +2036,17 @@ dependencies = [
|
|||||||
"checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c"
|
"checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c"
|
||||||
"checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58"
|
"checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58"
|
||||||
"checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae"
|
"checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae"
|
||||||
|
"checksum hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)" = "<none>"
|
||||||
"checksum hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "eb27e8a3e8f17ac43ffa41bbda9cf5ad3f9f13ef66fa4873409d4902310275f7"
|
"checksum hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "eb27e8a3e8f17ac43ffa41bbda9cf5ad3f9f13ef66fa4873409d4902310275f7"
|
||||||
"checksum hyper 0.9.4 (git+https://github.com/ethcore/hyper)" = "<none>"
|
|
||||||
"checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11"
|
"checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11"
|
||||||
"checksum igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c8c12b1795b8b168f577c45fa10379b3814dcb11b7ab702406001f0d63f40484"
|
"checksum igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c8c12b1795b8b168f577c45fa10379b3814dcb11b7ab702406001f0d63f40484"
|
||||||
"checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c"
|
"checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c"
|
||||||
"checksum itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "086e1fa5fe48840b1cfdef3a20c7e3115599f8d5c4c87ef32a794a7cdd184d76"
|
"checksum itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "086e1fa5fe48840b1cfdef3a20c7e3115599f8d5c4c87ef32a794a7cdd184d76"
|
||||||
"checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1"
|
"checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1"
|
||||||
"checksum json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)" = "<none>"
|
"checksum jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "<none>"
|
||||||
"checksum json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)" = "<none>"
|
"checksum jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc.git)" = "<none>"
|
||||||
"checksum jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3c5094610b07f28f3edaf3947b732dadb31dbba4941d4d0c1c7a8350208f4414"
|
"checksum jsonrpc-ipc-server 0.2.4 (git+https://github.com/ethcore/jsonrpc.git)" = "<none>"
|
||||||
"checksum jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc-http-server.git)" = "<none>"
|
"checksum jsonrpc-tcp-server 0.1.0 (git+https://github.com/ethcore/jsonrpc.git)" = "<none>"
|
||||||
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
|
||||||
"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
|
"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
|
||||||
"checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f"
|
"checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f"
|
||||||
|
@ -30,8 +30,7 @@ serde = "0.8.0"
|
|||||||
serde_json = "0.8.0"
|
serde_json = "0.8.0"
|
||||||
hyper = { version = "0.9", default-features = false }
|
hyper = { version = "0.9", default-features = false }
|
||||||
ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" }
|
ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" }
|
||||||
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
|
fdlimit = "0.1"
|
||||||
fdlimit = { path = "util/fdlimit" }
|
|
||||||
ethcore = { path = "ethcore" }
|
ethcore = { path = "ethcore" }
|
||||||
ethcore-util = { path = "util" }
|
ethcore-util = { path = "util" }
|
||||||
ethsync = { path = "sync" }
|
ethsync = { path = "sync" }
|
||||||
@ -89,4 +88,4 @@ name = "parity"
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
debug = false
|
debug = false
|
||||||
lto = false
|
lto = false
|
||||||
|
panic = "abort"
|
||||||
|
@ -12,8 +12,8 @@ build = "build.rs"
|
|||||||
rand = "0.3.14"
|
rand = "0.3.14"
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
env_logger = "0.3"
|
env_logger = "0.3"
|
||||||
jsonrpc-core = "3.0"
|
jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" }
|
||||||
jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc-http-server.git" }
|
jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" }
|
||||||
hyper = { default-features = false, git = "https://github.com/ethcore/hyper" }
|
hyper = { default-features = false, git = "https://github.com/ethcore/hyper" }
|
||||||
unicase = "1.3"
|
unicase = "1.3"
|
||||||
url = "1.0"
|
url = "1.0"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
description = "Base Package for all Parity built-in dapps"
|
description = "Base Package for all Parity built-in dapps"
|
||||||
name = "parity-dapps-glue"
|
name = "parity-dapps-glue"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io"]
|
authors = ["Ethcore <admin@ethcore.io"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
//! Simple Content Handler
|
//! Simple Content Handler
|
||||||
|
|
||||||
use std::io::Write;
|
|
||||||
use hyper::{header, server, Decoder, Encoder, Next};
|
use hyper::{header, server, Decoder, Encoder, Next};
|
||||||
use hyper::net::HttpStream;
|
use hyper::net::HttpStream;
|
||||||
use hyper::mime::Mime;
|
use hyper::mime::Mime;
|
||||||
|
@ -58,7 +58,7 @@ pub fn extract_url(req: &server::Request<net::HttpStream>) -> Option<Url> {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
uri::RequestUri::AbsolutePath(ref path) => {
|
uri::RequestUri::AbsolutePath { ref path, .. } => {
|
||||||
// Attempt to prepend the Host header (mandatory in HTTP/1.1)
|
// Attempt to prepend the Host header (mandatory in HTTP/1.1)
|
||||||
let url_string = match req.headers().get::<header::Host>() {
|
let url_string = match req.headers().get::<header::Host>() {
|
||||||
Some(ref host) => {
|
Some(ref host) => {
|
||||||
|
@ -266,7 +266,11 @@ impl Server {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
/// Returns address that this server is bound to.
|
/// Returns address that this server is bound to.
|
||||||
pub fn addr(&self) -> &SocketAddr {
|
pub fn addr(&self) -> &SocketAddr {
|
||||||
self.server.as_ref().expect("server is always Some at the start; it's consumed only when object is dropped; qed").addr()
|
self.server.as_ref()
|
||||||
|
.expect("server is always Some at the start; it's consumed only when object is dropped; qed")
|
||||||
|
.addrs()
|
||||||
|
.first()
|
||||||
|
.expect("You cannot start the server without binding to at least one address; qed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::io::Write;
|
|
||||||
use time::{self, Duration};
|
use time::{self, Duration};
|
||||||
|
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
@ -126,7 +125,7 @@ impl<T: Dapp> PageHandler<T> {
|
|||||||
impl<T: Dapp> server::Handler<HttpStream> for PageHandler<T> {
|
impl<T: Dapp> server::Handler<HttpStream> for PageHandler<T> {
|
||||||
fn on_request(&mut self, req: server::Request<HttpStream>) -> Next {
|
fn on_request(&mut self, req: server::Request<HttpStream>) -> Next {
|
||||||
self.file = match *req.uri() {
|
self.file = match *req.uri() {
|
||||||
RequestUri::AbsolutePath(ref path) => {
|
RequestUri::AbsolutePath { ref path, .. } => {
|
||||||
self.app.file(&self.extract_path(path))
|
self.app.file(&self.extract_path(path))
|
||||||
},
|
},
|
||||||
RequestUri::AbsoluteUri(ref url) => {
|
RequestUri::AbsoluteUri(ref url) => {
|
||||||
|
@ -16,13 +16,14 @@
|
|||||||
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use hyper;
|
use hyper;
|
||||||
use jsonrpc_core::IoHandler;
|
|
||||||
use jsonrpc_http_server::{ServerHandler, PanicHandler, AccessControlAllowOrigin};
|
use jsonrpc_core::{IoHandler, ResponseHandler, Request, Response};
|
||||||
|
use jsonrpc_http_server::{ServerHandler, PanicHandler, AccessControlAllowOrigin, RpcHandler};
|
||||||
use endpoint::{Endpoint, EndpointPath, Handler};
|
use endpoint::{Endpoint, EndpointPath, Handler};
|
||||||
|
|
||||||
pub fn rpc(handler: Arc<IoHandler>, panic_handler: Arc<Mutex<Option<Box<Fn() -> () + Send>>>>) -> Box<Endpoint> {
|
pub fn rpc(handler: Arc<IoHandler>, panic_handler: Arc<Mutex<Option<Box<Fn() -> () + Send>>>>) -> Box<Endpoint> {
|
||||||
Box::new(RpcEndpoint {
|
Box::new(RpcEndpoint {
|
||||||
handler: handler,
|
handler: Arc::new(RpcMiddleware::new(handler)),
|
||||||
panic_handler: panic_handler,
|
panic_handler: panic_handler,
|
||||||
cors_domain: None,
|
cors_domain: None,
|
||||||
// NOTE [ToDr] We don't need to do any hosts validation here. It's already done in router.
|
// NOTE [ToDr] We don't need to do any hosts validation here. It's already done in router.
|
||||||
@ -31,7 +32,7 @@ pub fn rpc(handler: Arc<IoHandler>, panic_handler: Arc<Mutex<Option<Box<Fn() ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct RpcEndpoint {
|
struct RpcEndpoint {
|
||||||
handler: Arc<IoHandler>,
|
handler: Arc<RpcMiddleware>,
|
||||||
panic_handler: Arc<Mutex<Option<Box<Fn() -> () + Send>>>>,
|
panic_handler: Arc<Mutex<Option<Box<Fn() -> () + Send>>>>,
|
||||||
cors_domain: Option<Vec<AccessControlAllowOrigin>>,
|
cors_domain: Option<Vec<AccessControlAllowOrigin>>,
|
||||||
allowed_hosts: Option<Vec<String>>,
|
allowed_hosts: Option<Vec<String>>,
|
||||||
@ -49,3 +50,86 @@ impl Endpoint for RpcEndpoint {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const MIDDLEWARE_METHOD: &'static str = "eth_accounts";
|
||||||
|
|
||||||
|
struct RpcMiddleware {
|
||||||
|
handler: Arc<IoHandler>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpcMiddleware {
|
||||||
|
fn new(handler: Arc<IoHandler>) -> Self {
|
||||||
|
RpcMiddleware {
|
||||||
|
handler: handler,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Appends additional parameter for specific calls.
|
||||||
|
fn augment_request(&self, request: &mut Request, meta: Option<Meta>) {
|
||||||
|
use jsonrpc_core::{Call, Params, to_value};
|
||||||
|
|
||||||
|
fn augment_call(call: &mut Call, meta: Option<&Meta>) {
|
||||||
|
match (call, meta) {
|
||||||
|
(&mut Call::MethodCall(ref mut method_call), Some(meta)) if &method_call.method == MIDDLEWARE_METHOD => {
|
||||||
|
let session = to_value(&meta.app_id);
|
||||||
|
|
||||||
|
let params = match method_call.params {
|
||||||
|
Some(Params::Array(ref vec)) if vec.len() == 0 => Some(Params::Array(vec![session])),
|
||||||
|
// invalid params otherwise
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
method_call.params = params;
|
||||||
|
},
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match *request {
|
||||||
|
Request::Single(ref mut call) => augment_call(call, meta.as_ref()),
|
||||||
|
Request::Batch(ref mut vec) => {
|
||||||
|
for mut call in vec {
|
||||||
|
augment_call(call, meta.as_ref())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Meta {
|
||||||
|
app_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpcHandler for RpcMiddleware {
|
||||||
|
type Metadata = Meta;
|
||||||
|
|
||||||
|
fn read_metadata(&self, request: &hyper::server::Request<hyper::net::HttpStream>) -> Option<Self::Metadata> {
|
||||||
|
request.headers().get::<hyper::header::Referer>()
|
||||||
|
.and_then(|referer| hyper::Url::parse(referer).ok())
|
||||||
|
.and_then(|url| {
|
||||||
|
url.path_segments()
|
||||||
|
.and_then(|mut split| split.next())
|
||||||
|
.map(|app_id| Meta {
|
||||||
|
app_id: app_id.to_owned(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_request<H>(&self, request_str: &str, response_handler: H, meta: Option<Self::Metadata>) where
|
||||||
|
H: ResponseHandler<Option<String>, Option<String>> + 'static
|
||||||
|
{
|
||||||
|
let handler = IoHandler::convert_handler(response_handler);
|
||||||
|
let request = IoHandler::read_request(request_str);
|
||||||
|
trace!(target: "rpc", "Request metadata: {:?}", meta);
|
||||||
|
|
||||||
|
match request {
|
||||||
|
Ok(mut request) => {
|
||||||
|
self.augment_request(&mut request, meta);
|
||||||
|
self.handler.request_handler().handle_request(request, handler, None)
|
||||||
|
},
|
||||||
|
Err(error) => handler.send(Some(Response::from(error))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ description = "Ethcore Parity UI"
|
|||||||
homepage = "http://ethcore.io"
|
homepage = "http://ethcore.io"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
name = "parity-ui"
|
name = "parity-ui"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
@ -3,7 +3,7 @@ description = "Ethcore Database"
|
|||||||
homepage = "http://ethcore.io"
|
homepage = "http://ethcore.io"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
name = "ethcore-db"
|
name = "ethcore-db"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ description = "Ethcore development/test/build tools"
|
|||||||
homepage = "http://ethcore.io"
|
homepage = "http://ethcore.io"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
name = "ethcore-devtools"
|
name = "ethcore-devtools"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ethash"
|
name = "ethash"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["arkpar <arkadiy@ethcore.io"]
|
authors = ["arkpar <arkadiy@ethcore.io"]
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -5,6 +5,10 @@ license = "GPL-3.0"
|
|||||||
name = "ethcore-light"
|
name = "ethcore-light"
|
||||||
version = "1.5.0"
|
version = "1.5.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
"ethcore-ipc-codegen" = { path = "../../ipc/codegen" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
@ -12,5 +16,6 @@ ethcore = { path = ".." }
|
|||||||
ethcore-util = { path = "../../util" }
|
ethcore-util = { path = "../../util" }
|
||||||
ethcore-network = { path = "../../util/network" }
|
ethcore-network = { path = "../../util/network" }
|
||||||
ethcore-io = { path = "../../util/io" }
|
ethcore-io = { path = "../../util/io" }
|
||||||
|
ethcore-ipc = { path = "../../ipc/rpc" }
|
||||||
rlp = { path = "../../util/rlp" }
|
rlp = { path = "../../util/rlp" }
|
||||||
time = "0.1"
|
time = "0.1"
|
21
ethcore/light/build.rs
Normal file
21
ethcore/light/build.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
extern crate ethcore_ipc_codegen;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
ethcore_ipc_codegen::derive_binary("src/types/mod.rs.in").unwrap();
|
||||||
|
}
|
@ -101,7 +101,7 @@ impl Provider for Client {
|
|||||||
Vec::new()
|
Vec::new()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn code(&self, _req: request::ContractCodes) -> Vec<Bytes> {
|
fn contract_code(&self, _req: request::ContractCodes) -> Vec<Bytes> {
|
||||||
Vec::new()
|
Vec::new()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,20 +28,25 @@
|
|||||||
//! It starts by performing a header-only sync, verifying random samples
|
//! It starts by performing a header-only sync, verifying random samples
|
||||||
//! of members of the chain to varying degrees.
|
//! of members of the chain to varying degrees.
|
||||||
|
|
||||||
// TODO: remove when integrating with parity.
|
// TODO: remove when integrating with the rest of parity.
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
pub mod client;
|
pub mod client;
|
||||||
pub mod net;
|
pub mod net;
|
||||||
pub mod provider;
|
pub mod provider;
|
||||||
pub mod request;
|
|
||||||
|
|
||||||
extern crate ethcore_util as util;
|
mod types;
|
||||||
extern crate ethcore_network as network;
|
|
||||||
extern crate ethcore_io as io;
|
pub use self::provider::Provider;
|
||||||
extern crate ethcore;
|
pub use types::les_request as request;
|
||||||
extern crate rlp;
|
|
||||||
extern crate time;
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
|
extern crate ethcore;
|
||||||
|
extern crate ethcore_util as util;
|
||||||
|
extern crate ethcore_network as network;
|
||||||
|
extern crate ethcore_io as io;
|
||||||
|
extern crate ethcore_ipc as ipc;
|
||||||
|
extern crate rlp;
|
||||||
|
extern crate time;
|
@ -206,6 +206,39 @@ impl FlowParams {
|
|||||||
cost.0 + (amount * cost.1)
|
cost.0 + (amount * cost.1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute the maximum number of costs of a specific kind which can be made
|
||||||
|
/// with the given buffer.
|
||||||
|
/// Saturates at `usize::max()`. This is not a problem in practice because
|
||||||
|
/// this amount of requests is already prohibitively large.
|
||||||
|
pub fn max_amount(&self, buffer: &Buffer, kind: request::Kind) -> usize {
|
||||||
|
use util::Uint;
|
||||||
|
use std::usize;
|
||||||
|
|
||||||
|
let cost = match kind {
|
||||||
|
request::Kind::Headers => &self.costs.headers,
|
||||||
|
request::Kind::Bodies => &self.costs.bodies,
|
||||||
|
request::Kind::Receipts => &self.costs.receipts,
|
||||||
|
request::Kind::StateProofs => &self.costs.state_proofs,
|
||||||
|
request::Kind::Codes => &self.costs.contract_codes,
|
||||||
|
request::Kind::HeaderProofs => &self.costs.header_proofs,
|
||||||
|
};
|
||||||
|
|
||||||
|
let start = buffer.current();
|
||||||
|
|
||||||
|
if start <= cost.0 {
|
||||||
|
return 0;
|
||||||
|
} else if cost.1 == U256::zero() {
|
||||||
|
return usize::MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
let max = (start - cost.0) / cost.1;
|
||||||
|
if max >= usize::MAX.into() {
|
||||||
|
usize::MAX
|
||||||
|
} else {
|
||||||
|
max.as_u64() as usize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Create initial buffer parameter.
|
/// Create initial buffer parameter.
|
||||||
pub fn create_buffer(&self) -> Buffer {
|
pub fn create_buffer(&self) -> Buffer {
|
||||||
Buffer {
|
Buffer {
|
||||||
@ -228,6 +261,16 @@ impl FlowParams {
|
|||||||
|
|
||||||
buf.estimate = ::std::cmp::min(self.limit, buf.estimate + (elapsed * self.recharge));
|
buf.estimate = ::std::cmp::min(self.limit, buf.estimate + (elapsed * self.recharge));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Refund some buffer which was previously deducted.
|
||||||
|
/// Does not update the recharge timestamp.
|
||||||
|
pub fn refund(&self, buf: &mut Buffer, refund_amount: U256) {
|
||||||
|
buf.estimate = buf.estimate + refund_amount;
|
||||||
|
|
||||||
|
if buf.estimate > self.limit {
|
||||||
|
buf.estimate = self.limit
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -52,6 +52,8 @@ pub enum Error {
|
|||||||
UnexpectedHandshake,
|
UnexpectedHandshake,
|
||||||
/// Peer on wrong network (wrong NetworkId or genesis hash)
|
/// Peer on wrong network (wrong NetworkId or genesis hash)
|
||||||
WrongNetwork,
|
WrongNetwork,
|
||||||
|
/// Unknown peer.
|
||||||
|
UnknownPeer,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
@ -64,6 +66,7 @@ impl Error {
|
|||||||
Error::UnrecognizedPacket(_) => Punishment::Disconnect,
|
Error::UnrecognizedPacket(_) => Punishment::Disconnect,
|
||||||
Error::UnexpectedHandshake => Punishment::Disconnect,
|
Error::UnexpectedHandshake => Punishment::Disconnect,
|
||||||
Error::WrongNetwork => Punishment::Disable,
|
Error::WrongNetwork => Punishment::Disable,
|
||||||
|
Error::UnknownPeer => Punishment::Disconnect,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -89,6 +92,7 @@ impl fmt::Display for Error {
|
|||||||
Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code),
|
Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code),
|
||||||
Error::UnexpectedHandshake => write!(f, "Unexpected handshake"),
|
Error::UnexpectedHandshake => write!(f, "Unexpected handshake"),
|
||||||
Error::WrongNetwork => write!(f, "Wrong network"),
|
Error::WrongNetwork => write!(f, "Wrong network"),
|
||||||
|
Error::UnknownPeer => write!(f, "unknown peer"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -19,27 +19,28 @@
|
|||||||
//! This uses a "Provider" to answer requests.
|
//! This uses a "Provider" to answer requests.
|
||||||
//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES)
|
//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES)
|
||||||
|
|
||||||
|
use ethcore::transaction::SignedTransaction;
|
||||||
use io::TimerToken;
|
use io::TimerToken;
|
||||||
use network::{NetworkProtocolHandler, NetworkContext, NetworkError, PeerId};
|
use network::{NetworkProtocolHandler, NetworkContext, NetworkError, PeerId};
|
||||||
use rlp::{RlpStream, Stream, UntrustedRlp, View};
|
use rlp::{RlpStream, Stream, UntrustedRlp, View};
|
||||||
use util::hash::H256;
|
use util::hash::H256;
|
||||||
use util::RwLock;
|
use util::{Mutex, RwLock, U256};
|
||||||
|
use time::SteadyTime;
|
||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::sync::atomic::AtomicUsize;
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
use provider::Provider;
|
use provider::Provider;
|
||||||
use request::{self, Request};
|
use request::{self, Request};
|
||||||
|
|
||||||
use self::buffer_flow::{Buffer, FlowParams};
|
use self::buffer_flow::{Buffer, FlowParams};
|
||||||
use self::error::{Error, Punishment};
|
use self::error::{Error, Punishment};
|
||||||
use self::status::{Status, Capabilities};
|
|
||||||
|
|
||||||
mod buffer_flow;
|
mod buffer_flow;
|
||||||
mod error;
|
mod error;
|
||||||
mod status;
|
mod status;
|
||||||
|
|
||||||
pub use self::status::Announcement;
|
pub use self::status::{Status, Capabilities, Announcement, NetworkId};
|
||||||
|
|
||||||
const TIMEOUT: TimerToken = 0;
|
const TIMEOUT: TimerToken = 0;
|
||||||
const TIMEOUT_INTERVAL_MS: u64 = 1000;
|
const TIMEOUT_INTERVAL_MS: u64 = 1000;
|
||||||
@ -86,6 +87,10 @@ mod packet {
|
|||||||
pub const HEADER_PROOFS: u8 = 0x0e;
|
pub const HEADER_PROOFS: u8 = 0x0e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A request id.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||||
|
pub struct ReqId(usize);
|
||||||
|
|
||||||
// A pending peer: one we've sent our status to but
|
// A pending peer: one we've sent our status to but
|
||||||
// may not have received one for.
|
// may not have received one for.
|
||||||
struct PendingPeer {
|
struct PendingPeer {
|
||||||
@ -103,32 +108,162 @@ struct Peer {
|
|||||||
sent_head: H256, // last head we've given them.
|
sent_head: H256, // last head we've given them.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Peer {
|
||||||
|
// check the maximum cost of a request, returning an error if there's
|
||||||
|
// not enough buffer left.
|
||||||
|
// returns the calculated maximum cost.
|
||||||
|
fn deduct_max(&mut self, flow_params: &FlowParams, kind: request::Kind, max: usize) -> Result<U256, Error> {
|
||||||
|
flow_params.recharge(&mut self.local_buffer);
|
||||||
|
|
||||||
|
let max_cost = flow_params.compute_cost(kind, max);
|
||||||
|
try!(self.local_buffer.deduct_cost(max_cost));
|
||||||
|
Ok(max_cost)
|
||||||
|
}
|
||||||
|
|
||||||
|
// refund buffer for a request. returns new buffer amount.
|
||||||
|
fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 {
|
||||||
|
flow_params.refund(&mut self.local_buffer, amount);
|
||||||
|
|
||||||
|
self.local_buffer.current()
|
||||||
|
}
|
||||||
|
|
||||||
|
// recharge remote buffer with remote flow params.
|
||||||
|
fn recharge_remote(&mut self) {
|
||||||
|
let flow = &mut self.remote_flow;
|
||||||
|
flow.recharge(&mut self.remote_buffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An LES event handler.
|
||||||
|
pub trait Handler: Send + Sync {
|
||||||
|
/// Called when a peer connects.
|
||||||
|
fn on_connect(&self, _id: PeerId, _status: &Status, _capabilities: &Capabilities) { }
|
||||||
|
/// Called when a peer disconnects
|
||||||
|
fn on_disconnect(&self, _id: PeerId) { }
|
||||||
|
/// Called when a peer makes an announcement.
|
||||||
|
fn on_announcement(&self, _id: PeerId, _announcement: &Announcement) { }
|
||||||
|
/// Called when a peer requests relay of some transactions.
|
||||||
|
fn on_transactions(&self, _id: PeerId, _relay: &[SignedTransaction]) { }
|
||||||
|
}
|
||||||
|
|
||||||
|
// a request and the time it was made.
|
||||||
|
struct Requested {
|
||||||
|
request: Request,
|
||||||
|
timestamp: SteadyTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Protocol parameters.
|
||||||
|
pub struct Params {
|
||||||
|
/// Genesis hash.
|
||||||
|
pub genesis_hash: H256,
|
||||||
|
/// Network id.
|
||||||
|
pub network_id: NetworkId,
|
||||||
|
/// Buffer flow parameters.
|
||||||
|
pub flow_params: FlowParams,
|
||||||
|
/// Initial capabilities.
|
||||||
|
pub capabilities: Capabilities,
|
||||||
|
}
|
||||||
|
|
||||||
/// This is an implementation of the light ethereum network protocol, abstracted
|
/// This is an implementation of the light ethereum network protocol, abstracted
|
||||||
/// over a `Provider` of data and a p2p network.
|
/// over a `Provider` of data and a p2p network.
|
||||||
///
|
///
|
||||||
/// This is simply designed for request-response purposes. Higher level uses
|
/// This is simply designed for request-response purposes. Higher level uses
|
||||||
/// of the protocol, such as synchronization, will function as wrappers around
|
/// of the protocol, such as synchronization, will function as wrappers around
|
||||||
/// this system.
|
/// this system.
|
||||||
|
//
|
||||||
|
// LOCK ORDER:
|
||||||
|
// Locks must be acquired in the order declared, and when holding a read lock
|
||||||
|
// on the peers, only one peer may be held at a time.
|
||||||
pub struct LightProtocol {
|
pub struct LightProtocol {
|
||||||
provider: Box<Provider>,
|
provider: Box<Provider>,
|
||||||
genesis_hash: H256,
|
genesis_hash: H256,
|
||||||
network_id: status::NetworkId,
|
network_id: NetworkId,
|
||||||
pending_peers: RwLock<HashMap<PeerId, PendingPeer>>,
|
pending_peers: RwLock<HashMap<PeerId, PendingPeer>>,
|
||||||
peers: RwLock<HashMap<PeerId, Peer>>,
|
peers: RwLock<HashMap<PeerId, Mutex<Peer>>>,
|
||||||
pending_requests: RwLock<HashMap<usize, Request>>,
|
pending_requests: RwLock<HashMap<usize, Requested>>,
|
||||||
capabilities: RwLock<Capabilities>,
|
capabilities: RwLock<Capabilities>,
|
||||||
flow_params: FlowParams, // assumed static and same for every peer.
|
flow_params: FlowParams, // assumed static and same for every peer.
|
||||||
|
handlers: Vec<Box<Handler>>,
|
||||||
req_id: AtomicUsize,
|
req_id: AtomicUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightProtocol {
|
impl LightProtocol {
|
||||||
|
/// Create a new instance of the protocol manager.
|
||||||
|
pub fn new(provider: Box<Provider>, params: Params) -> Self {
|
||||||
|
LightProtocol {
|
||||||
|
provider: provider,
|
||||||
|
genesis_hash: params.genesis_hash,
|
||||||
|
network_id: params.network_id,
|
||||||
|
pending_peers: RwLock::new(HashMap::new()),
|
||||||
|
peers: RwLock::new(HashMap::new()),
|
||||||
|
pending_requests: RwLock::new(HashMap::new()),
|
||||||
|
capabilities: RwLock::new(params.capabilities),
|
||||||
|
flow_params: params.flow_params,
|
||||||
|
handlers: Vec::new(),
|
||||||
|
req_id: AtomicUsize::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check the maximum amount of requests of a specific type
|
||||||
|
/// which a peer would be able to serve.
|
||||||
|
pub fn max_requests(&self, peer: PeerId, kind: request::Kind) -> Option<usize> {
|
||||||
|
self.peers.read().get(&peer).map(|peer| {
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
peer.recharge_remote();
|
||||||
|
peer.remote_flow.max_amount(&peer.remote_buffer, kind)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Make a request to a peer.
|
||||||
|
///
|
||||||
|
/// Fails on: nonexistent peer, network error,
|
||||||
|
/// insufficient buffer. Does not check capabilities before sending.
|
||||||
|
/// On success, returns a request id which can later be coordinated
|
||||||
|
/// with an event.
|
||||||
|
pub fn request_from(&self, io: &NetworkContext, peer_id: &PeerId, request: Request) -> Result<ReqId, Error> {
|
||||||
|
let peers = self.peers.read();
|
||||||
|
let peer = try!(peers.get(peer_id).ok_or_else(|| Error::UnknownPeer));
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
|
peer.recharge_remote();
|
||||||
|
|
||||||
|
let max = peer.remote_flow.compute_cost(request.kind(), request.amount());
|
||||||
|
try!(peer.remote_buffer.deduct_cost(max));
|
||||||
|
|
||||||
|
let req_id = self.req_id.fetch_add(1, Ordering::SeqCst);
|
||||||
|
let packet_data = encode_request(&request, req_id);
|
||||||
|
|
||||||
|
let packet_id = match request.kind() {
|
||||||
|
request::Kind::Headers => packet::GET_BLOCK_HEADERS,
|
||||||
|
request::Kind::Bodies => packet::GET_BLOCK_BODIES,
|
||||||
|
request::Kind::Receipts => packet::GET_RECEIPTS,
|
||||||
|
request::Kind::StateProofs => packet::GET_PROOFS,
|
||||||
|
request::Kind::Codes => packet::GET_CONTRACT_CODES,
|
||||||
|
request::Kind::HeaderProofs => packet::GET_HEADER_PROOFS,
|
||||||
|
};
|
||||||
|
|
||||||
|
try!(io.send(*peer_id, packet_id, packet_data));
|
||||||
|
|
||||||
|
peer.current_asking.insert(req_id);
|
||||||
|
self.pending_requests.write().insert(req_id, Requested {
|
||||||
|
request: request,
|
||||||
|
timestamp: SteadyTime::now(),
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(ReqId(req_id))
|
||||||
|
}
|
||||||
|
|
||||||
/// Make an announcement of new chain head and capabilities to all peers.
|
/// Make an announcement of new chain head and capabilities to all peers.
|
||||||
/// The announcement is expected to be valid.
|
/// The announcement is expected to be valid.
|
||||||
pub fn make_announcement(&self, mut announcement: Announcement, io: &NetworkContext) {
|
pub fn make_announcement(&self, io: &NetworkContext, mut announcement: Announcement) {
|
||||||
let mut reorgs_map = HashMap::new();
|
let mut reorgs_map = HashMap::new();
|
||||||
|
|
||||||
|
// update stored capabilities
|
||||||
|
self.capabilities.write().update_from(&announcement);
|
||||||
|
|
||||||
// calculate reorg info and send packets
|
// calculate reorg info and send packets
|
||||||
for (peer_id, peer_info) in self.peers.write().iter_mut() {
|
for (peer_id, peer_info) in self.peers.read().iter() {
|
||||||
|
let mut peer_info = peer_info.lock();
|
||||||
let reorg_depth = reorgs_map.entry(peer_info.sent_head)
|
let reorg_depth = reorgs_map.entry(peer_info.sent_head)
|
||||||
.or_insert_with(|| {
|
.or_insert_with(|| {
|
||||||
match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) {
|
match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) {
|
||||||
@ -151,6 +286,14 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Add an event handler.
|
||||||
|
/// Ownership will be transferred to the protocol structure,
|
||||||
|
/// and the handler will be kept alive as long as it is.
|
||||||
|
/// These are intended to be added at the beginning of the
|
||||||
|
pub fn add_handler(&mut self, handler: Box<Handler>) {
|
||||||
|
self.handlers.push(handler);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightProtocol {
|
impl LightProtocol {
|
||||||
@ -173,7 +316,11 @@ impl LightProtocol {
|
|||||||
fn on_disconnect(&self, peer: PeerId) {
|
fn on_disconnect(&self, peer: PeerId) {
|
||||||
// TODO: reassign all requests assigned to this peer.
|
// TODO: reassign all requests assigned to this peer.
|
||||||
self.pending_peers.write().remove(&peer);
|
self.pending_peers.write().remove(&peer);
|
||||||
self.peers.write().remove(&peer);
|
if self.peers.write().remove(&peer).is_some() {
|
||||||
|
for handler in &self.handlers {
|
||||||
|
handler.on_disconnect(peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// send status to a peer.
|
// send status to a peer.
|
||||||
@ -219,15 +366,19 @@ impl LightProtocol {
|
|||||||
return Err(Error::WrongNetwork);
|
return Err(Error::WrongNetwork);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.peers.write().insert(*peer, Peer {
|
self.peers.write().insert(*peer, Mutex::new(Peer {
|
||||||
local_buffer: self.flow_params.create_buffer(),
|
local_buffer: self.flow_params.create_buffer(),
|
||||||
remote_buffer: flow_params.create_buffer(),
|
remote_buffer: flow_params.create_buffer(),
|
||||||
current_asking: HashSet::new(),
|
current_asking: HashSet::new(),
|
||||||
status: status,
|
status: status.clone(),
|
||||||
capabilities: capabilities,
|
capabilities: capabilities.clone(),
|
||||||
remote_flow: flow_params,
|
remote_flow: flow_params,
|
||||||
sent_head: pending.sent_head,
|
sent_head: pending.sent_head,
|
||||||
});
|
}));
|
||||||
|
|
||||||
|
for handler in &self.handlers {
|
||||||
|
handler.on_connect(*peer, &status, &capabilities)
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -240,13 +391,15 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let announcement = try!(status::parse_announcement(data));
|
let announcement = try!(status::parse_announcement(data));
|
||||||
let mut peers = self.peers.write();
|
let peers = self.peers.read();
|
||||||
|
|
||||||
let peer_info = match peers.get_mut(peer) {
|
let peer_info = match peers.get(peer) {
|
||||||
Some(info) => info,
|
Some(info) => info,
|
||||||
None => return Ok(()),
|
None => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let mut peer_info = peer_info.lock();
|
||||||
|
|
||||||
// update status.
|
// update status.
|
||||||
{
|
{
|
||||||
// TODO: punish peer if they've moved backwards.
|
// TODO: punish peer if they've moved backwards.
|
||||||
@ -259,15 +412,11 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update capabilities.
|
// update capabilities.
|
||||||
{
|
peer_info.capabilities.update_from(&announcement);
|
||||||
let caps = &mut peer_info.capabilities;
|
|
||||||
caps.serve_headers = caps.serve_headers || announcement.serve_headers;
|
|
||||||
caps.serve_state_since = caps.serve_state_since.or(announcement.serve_state_since);
|
|
||||||
caps.serve_chain_since = caps.serve_chain_since.or(announcement.serve_chain_since);
|
|
||||||
caps.tx_relay = caps.tx_relay || announcement.tx_relay;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: notify listeners if new best block.
|
for handler in &self.handlers {
|
||||||
|
handler.on_announcement(*peer, &announcement);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -276,45 +425,39 @@ impl LightProtocol {
|
|||||||
fn get_block_headers(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
fn get_block_headers(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
const MAX_HEADERS: usize = 512;
|
const MAX_HEADERS: usize = 512;
|
||||||
|
|
||||||
let mut present_buffer = match self.peers.read().get(peer) {
|
let peers = self.peers.read();
|
||||||
Some(peer) => peer.local_buffer.clone(),
|
let peer = match peers.get(peer) {
|
||||||
|
Some(peer) => peer,
|
||||||
None => {
|
None => {
|
||||||
debug!(target: "les", "Ignoring announcement from unknown peer");
|
debug!(target: "les", "Ignoring request from unknown peer");
|
||||||
return Ok(())
|
return Ok(())
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.flow_params.recharge(&mut present_buffer);
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
let req_id: u64 = try!(data.val_at(0));
|
let req_id: u64 = try!(data.val_at(0));
|
||||||
|
|
||||||
|
let block = {
|
||||||
|
let rlp = try!(data.at(1));
|
||||||
|
(try!(rlp.val_at(0)), try!(rlp.val_at(1)))
|
||||||
|
};
|
||||||
|
|
||||||
let req = request::Headers {
|
let req = request::Headers {
|
||||||
block: {
|
block_num: block.0,
|
||||||
let rlp = try!(data.at(1));
|
block_hash: block.1,
|
||||||
(try!(rlp.val_at(0)), try!(rlp.val_at(1)))
|
|
||||||
},
|
|
||||||
max: ::std::cmp::min(MAX_HEADERS, try!(data.val_at(2))),
|
max: ::std::cmp::min(MAX_HEADERS, try!(data.val_at(2))),
|
||||||
skip: try!(data.val_at(3)),
|
skip: try!(data.val_at(3)),
|
||||||
reverse: try!(data.val_at(4)),
|
reverse: try!(data.val_at(4)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let max_cost = self.flow_params.compute_cost(request::Kind::Headers, req.max);
|
let max_cost = try!(peer.deduct_max(&self.flow_params, request::Kind::Headers, req.max));
|
||||||
try!(present_buffer.deduct_cost(max_cost));
|
|
||||||
|
|
||||||
let response = self.provider.block_headers(req);
|
let response = self.provider.block_headers(req);
|
||||||
let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len());
|
let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len());
|
||||||
|
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
|
||||||
|
|
||||||
let cur_buffer = match self.peers.write().get_mut(peer) {
|
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost);
|
||||||
Some(peer) => {
|
|
||||||
self.flow_params.recharge(&mut peer.local_buffer);
|
|
||||||
try!(peer.local_buffer.deduct_cost(actual_cost));
|
|
||||||
peer.local_buffer.current()
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
debug!(target: "les", "peer disconnected during serving of request.");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
io.respond(packet::BLOCK_HEADERS, {
|
io.respond(packet::BLOCK_HEADERS, {
|
||||||
let mut stream = RlpStream::new_list(response.len() + 2);
|
let mut stream = RlpStream::new_list(response.len() + 2);
|
||||||
stream.append(&req_id).append(&cur_buffer);
|
stream.append(&req_id).append(&cur_buffer);
|
||||||
@ -336,39 +479,30 @@ impl LightProtocol {
|
|||||||
fn get_block_bodies(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
fn get_block_bodies(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
const MAX_BODIES: usize = 256;
|
const MAX_BODIES: usize = 256;
|
||||||
|
|
||||||
let mut present_buffer = match self.peers.read().get(peer) {
|
let peers = self.peers.read();
|
||||||
Some(peer) => peer.local_buffer.clone(),
|
let peer = match peers.get(peer) {
|
||||||
|
Some(peer) => peer,
|
||||||
None => {
|
None => {
|
||||||
debug!(target: "les", "Ignoring announcement from unknown peer");
|
debug!(target: "les", "Ignoring request from unknown peer");
|
||||||
return Ok(())
|
return Ok(())
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
self.flow_params.recharge(&mut present_buffer);
|
|
||||||
let req_id: u64 = try!(data.val_at(0));
|
let req_id: u64 = try!(data.val_at(0));
|
||||||
|
|
||||||
let req = request::Bodies {
|
let req = request::Bodies {
|
||||||
block_hashes: try!(data.iter().skip(1).take(MAX_BODIES).map(|x| x.as_val()).collect())
|
block_hashes: try!(data.iter().skip(1).take(MAX_BODIES).map(|x| x.as_val()).collect())
|
||||||
};
|
};
|
||||||
|
|
||||||
let max_cost = self.flow_params.compute_cost(request::Kind::Bodies, req.block_hashes.len());
|
let max_cost = try!(peer.deduct_max(&self.flow_params, request::Kind::Bodies, req.block_hashes.len()));
|
||||||
try!(present_buffer.deduct_cost(max_cost));
|
|
||||||
|
|
||||||
let response = self.provider.block_bodies(req);
|
let response = self.provider.block_bodies(req);
|
||||||
let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count();
|
let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count();
|
||||||
let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len);
|
let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len);
|
||||||
|
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
|
||||||
|
|
||||||
let cur_buffer = match self.peers.write().get_mut(peer) {
|
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost);
|
||||||
Some(peer) => {
|
|
||||||
self.flow_params.recharge(&mut peer.local_buffer);
|
|
||||||
try!(peer.local_buffer.deduct_cost(actual_cost));
|
|
||||||
peer.local_buffer.current()
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
debug!(target: "les", "peer disconnected during serving of request.");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
io.respond(packet::BLOCK_BODIES, {
|
io.respond(packet::BLOCK_BODIES, {
|
||||||
let mut stream = RlpStream::new_list(response.len() + 2);
|
let mut stream = RlpStream::new_list(response.len() + 2);
|
||||||
@ -388,8 +522,44 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle a request for receipts.
|
// Handle a request for receipts.
|
||||||
fn get_receipts(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> {
|
fn get_receipts(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
unimplemented!()
|
const MAX_RECEIPTS: usize = 256;
|
||||||
|
|
||||||
|
let peers = self.peers.read();
|
||||||
|
let peer = match peers.get(peer) {
|
||||||
|
Some(peer) => peer,
|
||||||
|
None => {
|
||||||
|
debug!(target: "les", "Ignoring request from unknown peer");
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
|
let req_id: u64 = try!(data.val_at(0));
|
||||||
|
|
||||||
|
let req = request::Receipts {
|
||||||
|
block_hashes: try!(data.iter().skip(1).take(MAX_RECEIPTS).map(|x| x.as_val()).collect())
|
||||||
|
};
|
||||||
|
|
||||||
|
let max_cost = try!(peer.deduct_max(&self.flow_params, request::Kind::Receipts, req.block_hashes.len()));
|
||||||
|
|
||||||
|
let response = self.provider.receipts(req);
|
||||||
|
let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count();
|
||||||
|
let actual_cost = self.flow_params.compute_cost(request::Kind::Receipts, response_len);
|
||||||
|
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
|
||||||
|
|
||||||
|
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost);
|
||||||
|
|
||||||
|
io.respond(packet::RECEIPTS, {
|
||||||
|
let mut stream = RlpStream::new_list(response.len() + 2);
|
||||||
|
stream.append(&req_id).append(&cur_buffer);
|
||||||
|
|
||||||
|
for receipts in response {
|
||||||
|
stream.append_raw(&receipts, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receive a response for receipts.
|
// Receive a response for receipts.
|
||||||
@ -398,8 +568,55 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle a request for proofs.
|
// Handle a request for proofs.
|
||||||
fn get_proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> {
|
fn get_proofs(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
unimplemented!()
|
const MAX_PROOFS: usize = 128;
|
||||||
|
|
||||||
|
let peers = self.peers.read();
|
||||||
|
let peer = match peers.get(peer) {
|
||||||
|
Some(peer) => peer,
|
||||||
|
None => {
|
||||||
|
debug!(target: "les", "Ignoring request from unknown peer");
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
|
let req_id: u64 = try!(data.val_at(0));
|
||||||
|
|
||||||
|
let req = {
|
||||||
|
let requests: Result<Vec<_>, Error> = data.iter().skip(1).take(MAX_PROOFS).map(|x| {
|
||||||
|
Ok(request::StateProof {
|
||||||
|
block: try!(x.val_at(0)),
|
||||||
|
key1: try!(x.val_at(1)),
|
||||||
|
key2: if try!(x.at(2)).is_empty() { None } else { Some(try!(x.val_at(2))) },
|
||||||
|
from_level: try!(x.val_at(3)),
|
||||||
|
})
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
request::StateProofs {
|
||||||
|
requests: try!(requests),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let max_cost = try!(peer.deduct_max(&self.flow_params, request::Kind::StateProofs, req.requests.len()));
|
||||||
|
|
||||||
|
let response = self.provider.proofs(req);
|
||||||
|
let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count();
|
||||||
|
let actual_cost = self.flow_params.compute_cost(request::Kind::StateProofs, response_len);
|
||||||
|
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
|
||||||
|
|
||||||
|
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost);
|
||||||
|
|
||||||
|
io.respond(packet::PROOFS, {
|
||||||
|
let mut stream = RlpStream::new_list(response.len() + 2);
|
||||||
|
stream.append(&req_id).append(&cur_buffer);
|
||||||
|
|
||||||
|
for proof in response {
|
||||||
|
stream.append_raw(&proof, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receive a response for proofs.
|
// Receive a response for proofs.
|
||||||
@ -408,8 +625,53 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle a request for contract code.
|
// Handle a request for contract code.
|
||||||
fn get_contract_code(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> {
|
fn get_contract_code(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
unimplemented!()
|
const MAX_CODES: usize = 256;
|
||||||
|
|
||||||
|
let peers = self.peers.read();
|
||||||
|
let peer = match peers.get(peer) {
|
||||||
|
Some(peer) => peer,
|
||||||
|
None => {
|
||||||
|
debug!(target: "les", "Ignoring request from unknown peer");
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
|
let req_id: u64 = try!(data.val_at(0));
|
||||||
|
|
||||||
|
let req = {
|
||||||
|
let requests: Result<Vec<_>, Error> = data.iter().skip(1).take(MAX_CODES).map(|x| {
|
||||||
|
Ok(request::ContractCode {
|
||||||
|
block_hash: try!(x.val_at(0)),
|
||||||
|
account_key: try!(x.val_at(1)),
|
||||||
|
})
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
request::ContractCodes {
|
||||||
|
code_requests: try!(requests),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let max_cost = try!(peer.deduct_max(&self.flow_params, request::Kind::Codes, req.code_requests.len()));
|
||||||
|
|
||||||
|
let response = self.provider.contract_code(req);
|
||||||
|
let response_len = response.iter().filter(|x| !x.is_empty()).count();
|
||||||
|
let actual_cost = self.flow_params.compute_cost(request::Kind::Codes, response_len);
|
||||||
|
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
|
||||||
|
|
||||||
|
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost);
|
||||||
|
|
||||||
|
io.respond(packet::CONTRACT_CODES, {
|
||||||
|
let mut stream = RlpStream::new_list(response.len() + 2);
|
||||||
|
stream.append(&req_id).append(&cur_buffer);
|
||||||
|
|
||||||
|
for code in response {
|
||||||
|
stream.append_raw(&code, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receive a response for contract code.
|
// Receive a response for contract code.
|
||||||
@ -418,8 +680,54 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle a request for header proofs
|
// Handle a request for header proofs
|
||||||
fn get_header_proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> {
|
fn get_header_proofs(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
unimplemented!()
|
const MAX_PROOFS: usize = 256;
|
||||||
|
|
||||||
|
let peers = self.peers.read();
|
||||||
|
let peer = match peers.get(peer) {
|
||||||
|
Some(peer) => peer,
|
||||||
|
None => {
|
||||||
|
debug!(target: "les", "Ignoring request from unknown peer");
|
||||||
|
return Ok(())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
|
let req_id: u64 = try!(data.val_at(0));
|
||||||
|
|
||||||
|
let req = {
|
||||||
|
let requests: Result<Vec<_>, Error> = data.iter().skip(1).take(MAX_PROOFS).map(|x| {
|
||||||
|
Ok(request::HeaderProof {
|
||||||
|
cht_number: try!(x.val_at(0)),
|
||||||
|
block_number: try!(x.val_at(1)),
|
||||||
|
from_level: try!(x.val_at(2)),
|
||||||
|
})
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
request::HeaderProofs {
|
||||||
|
requests: try!(requests),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let max_cost = try!(peer.deduct_max(&self.flow_params, request::Kind::HeaderProofs, req.requests.len()));
|
||||||
|
|
||||||
|
let response = self.provider.header_proofs(req);
|
||||||
|
let response_len = response.iter().filter(|x| &x[..] != ::rlp::EMPTY_LIST_RLP).count();
|
||||||
|
let actual_cost = self.flow_params.compute_cost(request::Kind::HeaderProofs, response_len);
|
||||||
|
assert!(max_cost >= actual_cost, "Actual cost exceeded maximum computed cost.");
|
||||||
|
|
||||||
|
let cur_buffer = peer.refund(&self.flow_params, max_cost - actual_cost);
|
||||||
|
|
||||||
|
io.respond(packet::HEADER_PROOFS, {
|
||||||
|
let mut stream = RlpStream::new_list(response.len() + 2);
|
||||||
|
stream.append(&req_id).append(&cur_buffer);
|
||||||
|
|
||||||
|
for proof in response {
|
||||||
|
stream.append_raw(&proof, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receive a response for header proofs
|
// Receive a response for header proofs
|
||||||
@ -428,8 +736,18 @@ impl LightProtocol {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Receive a set of transactions to relay.
|
// Receive a set of transactions to relay.
|
||||||
fn relay_transactions(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> {
|
fn relay_transactions(&self, peer: &PeerId, data: UntrustedRlp) -> Result<(), Error> {
|
||||||
unimplemented!()
|
const MAX_TRANSACTIONS: usize = 256;
|
||||||
|
|
||||||
|
let txs: Vec<_> = try!(data.iter().take(MAX_TRANSACTIONS).map(|x| x.as_val::<SignedTransaction>()).collect());
|
||||||
|
|
||||||
|
debug!(target: "les", "Received {} transactions to relay from peer {}", txs.len(), peer);
|
||||||
|
|
||||||
|
for handler in &self.handlers {
|
||||||
|
handler.on_transactions(*peer, &txs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -464,7 +782,7 @@ impl NetworkProtocolHandler for LightProtocol {
|
|||||||
packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp),
|
packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp),
|
||||||
packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp),
|
packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp),
|
||||||
|
|
||||||
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
|
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, rlp),
|
||||||
|
|
||||||
other => {
|
other => {
|
||||||
Err(Error::UnrecognizedPacket(other))
|
Err(Error::UnrecognizedPacket(other))
|
||||||
@ -504,3 +822,85 @@ impl NetworkProtocolHandler for LightProtocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper for encoding the request to RLP with the given ID.
|
||||||
|
fn encode_request(req: &Request, req_id: usize) -> Vec<u8> {
|
||||||
|
match *req {
|
||||||
|
Request::Headers(ref headers) => {
|
||||||
|
let mut stream = RlpStream::new_list(5);
|
||||||
|
stream
|
||||||
|
.append(&req_id)
|
||||||
|
.begin_list(2)
|
||||||
|
.append(&headers.block_num)
|
||||||
|
.append(&headers.block_hash)
|
||||||
|
.append(&headers.max)
|
||||||
|
.append(&headers.skip)
|
||||||
|
.append(&headers.reverse);
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
Request::Bodies(ref request) => {
|
||||||
|
let mut stream = RlpStream::new_list(request.block_hashes.len() + 1);
|
||||||
|
stream.append(&req_id);
|
||||||
|
|
||||||
|
for hash in &request.block_hashes {
|
||||||
|
stream.append(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
Request::Receipts(ref request) => {
|
||||||
|
let mut stream = RlpStream::new_list(request.block_hashes.len() + 1);
|
||||||
|
stream.append(&req_id);
|
||||||
|
|
||||||
|
for hash in &request.block_hashes {
|
||||||
|
stream.append(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
Request::StateProofs(ref request) => {
|
||||||
|
let mut stream = RlpStream::new_list(request.requests.len() + 1);
|
||||||
|
stream.append(&req_id);
|
||||||
|
|
||||||
|
for proof_req in &request.requests {
|
||||||
|
stream.begin_list(4)
|
||||||
|
.append(&proof_req.block)
|
||||||
|
.append(&proof_req.key1);
|
||||||
|
|
||||||
|
match proof_req.key2 {
|
||||||
|
Some(ref key2) => stream.append(key2),
|
||||||
|
None => stream.append_empty_data(),
|
||||||
|
};
|
||||||
|
|
||||||
|
stream.append(&proof_req.from_level);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
Request::Codes(ref request) => {
|
||||||
|
let mut stream = RlpStream::new_list(request.code_requests.len() + 1);
|
||||||
|
stream.append(&req_id);
|
||||||
|
|
||||||
|
for code_req in &request.code_requests {
|
||||||
|
stream.begin_list(2)
|
||||||
|
.append(&code_req.block_hash)
|
||||||
|
.append(&code_req.account_key);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
Request::HeaderProofs(ref request) => {
|
||||||
|
let mut stream = RlpStream::new_list(request.requests.len() + 1);
|
||||||
|
stream.append(&req_id);
|
||||||
|
|
||||||
|
for proof_req in &request.requests {
|
||||||
|
stream.begin_list(3)
|
||||||
|
.append(&proof_req.cht_number)
|
||||||
|
.append(&proof_req.block_number)
|
||||||
|
.append(&proof_req.from_level);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -183,8 +183,10 @@ pub struct Capabilities {
|
|||||||
/// Whether this peer can serve headers
|
/// Whether this peer can serve headers
|
||||||
pub serve_headers: bool,
|
pub serve_headers: bool,
|
||||||
/// Earliest block number it can serve block/receipt requests for.
|
/// Earliest block number it can serve block/receipt requests for.
|
||||||
|
/// `None` means no requests will be servable.
|
||||||
pub serve_chain_since: Option<u64>,
|
pub serve_chain_since: Option<u64>,
|
||||||
/// Earliest block number it can serve state requests for.
|
/// Earliest block number it can serve state requests for.
|
||||||
|
/// `None` means no requests will be servable.
|
||||||
pub serve_state_since: Option<u64>,
|
pub serve_state_since: Option<u64>,
|
||||||
/// Whether it can relay transactions to the eth network.
|
/// Whether it can relay transactions to the eth network.
|
||||||
pub tx_relay: bool,
|
pub tx_relay: bool,
|
||||||
@ -201,6 +203,16 @@ impl Default for Capabilities {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Capabilities {
|
||||||
|
/// Update the capabilities from an announcement.
|
||||||
|
pub fn update_from(&mut self, announcement: &Announcement) {
|
||||||
|
self.serve_headers = self.serve_headers || announcement.serve_headers;
|
||||||
|
self.serve_state_since = self.serve_state_since.or(announcement.serve_state_since);
|
||||||
|
self.serve_chain_since = self.serve_chain_since.or(announcement.serve_chain_since);
|
||||||
|
self.tx_relay = self.tx_relay || announcement.tx_relay;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Attempt to parse a handshake message into its three parts:
|
/// Attempt to parse a handshake message into its three parts:
|
||||||
/// - chain status
|
/// - chain status
|
||||||
/// - serving capabilities
|
/// - serving capabilities
|
||||||
|
@ -17,8 +17,11 @@
|
|||||||
//! A provider for the LES protocol. This is typically a full node, who can
|
//! A provider for the LES protocol. This is typically a full node, who can
|
||||||
//! give as much data as necessary to its peers.
|
//! give as much data as necessary to its peers.
|
||||||
|
|
||||||
use ethcore::transaction::SignedTransaction;
|
|
||||||
use ethcore::blockchain_info::BlockChainInfo;
|
use ethcore::blockchain_info::BlockChainInfo;
|
||||||
|
use ethcore::client::{BlockChainClient, ProvingBlockChainClient};
|
||||||
|
use ethcore::transaction::SignedTransaction;
|
||||||
|
use ethcore::ids::BlockID;
|
||||||
|
|
||||||
use util::{Bytes, H256};
|
use util::{Bytes, H256};
|
||||||
|
|
||||||
use request;
|
use request;
|
||||||
@ -26,7 +29,8 @@ use request;
|
|||||||
/// Defines the operations that a provider for `LES` must fulfill.
|
/// Defines the operations that a provider for `LES` must fulfill.
|
||||||
///
|
///
|
||||||
/// These are defined at [1], but may be subject to change.
|
/// These are defined at [1], but may be subject to change.
|
||||||
/// Requests which can't be fulfilled should return an empty RLP list.
|
/// Requests which can't be fulfilled should return either an empty RLP list
|
||||||
|
/// or empty vector where appropriate.
|
||||||
///
|
///
|
||||||
/// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES)
|
/// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES)
|
||||||
pub trait Provider: Send + Sync {
|
pub trait Provider: Send + Sync {
|
||||||
@ -34,9 +38,12 @@ pub trait Provider: Send + Sync {
|
|||||||
fn chain_info(&self) -> BlockChainInfo;
|
fn chain_info(&self) -> BlockChainInfo;
|
||||||
|
|
||||||
/// Find the depth of a common ancestor between two blocks.
|
/// Find the depth of a common ancestor between two blocks.
|
||||||
|
/// If either block is unknown or an ancestor can't be found
|
||||||
|
/// then return `None`.
|
||||||
fn reorg_depth(&self, a: &H256, b: &H256) -> Option<u64>;
|
fn reorg_depth(&self, a: &H256, b: &H256) -> Option<u64>;
|
||||||
|
|
||||||
/// Earliest state.
|
/// Earliest block where state queries are available.
|
||||||
|
/// If `None`, no state queries are servable.
|
||||||
fn earliest_state(&self) -> Option<u64>;
|
fn earliest_state(&self) -> Option<u64>;
|
||||||
|
|
||||||
/// Provide a list of headers starting at the requested block,
|
/// Provide a list of headers starting at the requested block,
|
||||||
@ -57,11 +64,12 @@ pub trait Provider: Send + Sync {
|
|||||||
/// Provide a set of merkle proofs, as requested. Each request is a
|
/// Provide a set of merkle proofs, as requested. Each request is a
|
||||||
/// block hash and request parameters.
|
/// block hash and request parameters.
|
||||||
///
|
///
|
||||||
/// Returns a vector to RLP-encoded lists satisfying the requests.
|
/// Returns a vector of RLP-encoded lists satisfying the requests.
|
||||||
fn proofs(&self, req: request::StateProofs) -> Vec<Bytes>;
|
fn proofs(&self, req: request::StateProofs) -> Vec<Bytes>;
|
||||||
|
|
||||||
/// Provide contract code for the specified (block_hash, account_hash) pairs.
|
/// Provide contract code for the specified (block_hash, account_hash) pairs.
|
||||||
fn code(&self, req: request::ContractCodes) -> Vec<Bytes>;
|
/// Each item in the resulting vector is either the raw bytecode or empty.
|
||||||
|
fn contract_code(&self, req: request::ContractCodes) -> Vec<Bytes>;
|
||||||
|
|
||||||
/// Provide header proofs from the Canonical Hash Tries.
|
/// Provide header proofs from the Canonical Hash Tries.
|
||||||
fn header_proofs(&self, req: request::HeaderProofs) -> Vec<Bytes>;
|
fn header_proofs(&self, req: request::HeaderProofs) -> Vec<Bytes>;
|
||||||
@ -69,3 +77,92 @@ pub trait Provider: Send + Sync {
|
|||||||
/// Provide pending transactions.
|
/// Provide pending transactions.
|
||||||
fn pending_transactions(&self) -> Vec<SignedTransaction>;
|
fn pending_transactions(&self) -> Vec<SignedTransaction>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Implementation of a light client data provider for a client.
|
||||||
|
impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
|
||||||
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
BlockChainClient::chain_info(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reorg_depth(&self, a: &H256, b: &H256) -> Option<u64> {
|
||||||
|
self.tree_route(a, b).map(|route| route.index as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn earliest_state(&self) -> Option<u64> {
|
||||||
|
Some(self.pruning_info().earliest_state)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_headers(&self, req: request::Headers) -> Vec<Bytes> {
|
||||||
|
let best_num = self.chain_info().best_block_number;
|
||||||
|
let start_num = req.block_num;
|
||||||
|
|
||||||
|
match self.block_hash(BlockID::Number(req.block_num)) {
|
||||||
|
Some(hash) if hash == req.block_hash => {}
|
||||||
|
_=> {
|
||||||
|
trace!(target: "les_provider", "unknown/non-canonical start block in header request: {:?}", (req.block_num, req.block_hash));
|
||||||
|
return vec![]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(0u64..req.max as u64)
|
||||||
|
.map(|x: u64| x.saturating_mul(req.skip))
|
||||||
|
.take_while(|x| if req.reverse { x < &start_num } else { best_num - start_num < *x })
|
||||||
|
.map(|x| if req.reverse { start_num - x } else { start_num + x })
|
||||||
|
.map(|x| self.block_header(BlockID::Number(x)))
|
||||||
|
.take_while(|x| x.is_some())
|
||||||
|
.flat_map(|x| x)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_bodies(&self, req: request::Bodies) -> Vec<Bytes> {
|
||||||
|
req.block_hashes.into_iter()
|
||||||
|
.map(|hash| self.block_body(BlockID::Hash(hash)))
|
||||||
|
.map(|body| body.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec()))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn receipts(&self, req: request::Receipts) -> Vec<Bytes> {
|
||||||
|
req.block_hashes.into_iter()
|
||||||
|
.map(|hash| self.block_receipts(&hash))
|
||||||
|
.map(|receipts| receipts.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec()))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn proofs(&self, req: request::StateProofs) -> Vec<Bytes> {
|
||||||
|
use rlp::{RlpStream, Stream};
|
||||||
|
|
||||||
|
let mut results = Vec::with_capacity(req.requests.len());
|
||||||
|
|
||||||
|
for request in req.requests {
|
||||||
|
let proof = match request.key2 {
|
||||||
|
Some(key2) => self.prove_storage(request.key1, key2, request.from_level, BlockID::Hash(request.block)),
|
||||||
|
None => self.prove_account(request.key1, request.from_level, BlockID::Hash(request.block)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut stream = RlpStream::new_list(proof.len());
|
||||||
|
for node in proof {
|
||||||
|
stream.append_raw(&node, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
results.push(stream.out());
|
||||||
|
}
|
||||||
|
|
||||||
|
results
|
||||||
|
}
|
||||||
|
|
||||||
|
fn contract_code(&self, req: request::ContractCodes) -> Vec<Bytes> {
|
||||||
|
req.code_requests.into_iter()
|
||||||
|
.map(|req| {
|
||||||
|
self.code_by_hash(req.account_key, BlockID::Hash(req.block_hash))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn header_proofs(&self, req: request::HeaderProofs) -> Vec<Bytes> {
|
||||||
|
req.requests.into_iter().map(|_| ::rlp::EMPTY_LIST_RLP.to_vec()).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
||||||
|
BlockChainClient::pending_transactions(self)
|
||||||
|
}
|
||||||
|
}
|
@ -16,25 +16,26 @@
|
|||||||
|
|
||||||
//! LES request types.
|
//! LES request types.
|
||||||
|
|
||||||
// TODO: make IPC compatible.
|
|
||||||
|
|
||||||
use util::H256;
|
use util::H256;
|
||||||
|
|
||||||
/// A request for block headers.
|
/// A request for block headers.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct Headers {
|
pub struct Headers {
|
||||||
/// Block information for the request being made.
|
/// Starting block number
|
||||||
pub block: (u64, H256),
|
pub block_num: u64,
|
||||||
|
/// Starting block hash. This and number could be combined but IPC codegen is
|
||||||
|
/// not robust enough to support it.
|
||||||
|
pub block_hash: H256,
|
||||||
/// The maximum amount of headers which can be returned.
|
/// The maximum amount of headers which can be returned.
|
||||||
pub max: usize,
|
pub max: usize,
|
||||||
/// The amount of headers to skip between each response entry.
|
/// The amount of headers to skip between each response entry.
|
||||||
pub skip: usize,
|
pub skip: u64,
|
||||||
/// Whether the headers should proceed in falling number from the initial block.
|
/// Whether the headers should proceed in falling number from the initial block.
|
||||||
pub reverse: bool,
|
pub reverse: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for specific block bodies.
|
/// A request for specific block bodies.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct Bodies {
|
pub struct Bodies {
|
||||||
/// Hashes which bodies are being requested for.
|
/// Hashes which bodies are being requested for.
|
||||||
pub block_hashes: Vec<H256>
|
pub block_hashes: Vec<H256>
|
||||||
@ -44,14 +45,14 @@ pub struct Bodies {
|
|||||||
///
|
///
|
||||||
/// This request is answered with a list of transaction receipts for each block
|
/// This request is answered with a list of transaction receipts for each block
|
||||||
/// requested.
|
/// requested.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct Receipts {
|
pub struct Receipts {
|
||||||
/// Block hashes to return receipts for.
|
/// Block hashes to return receipts for.
|
||||||
pub block_hashes: Vec<H256>,
|
pub block_hashes: Vec<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for a state proof
|
/// A request for a state proof
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct StateProof {
|
pub struct StateProof {
|
||||||
/// Block hash to query state from.
|
/// Block hash to query state from.
|
||||||
pub block: H256,
|
pub block: H256,
|
||||||
@ -65,21 +66,30 @@ pub struct StateProof {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A request for state proofs.
|
/// A request for state proofs.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct StateProofs {
|
pub struct StateProofs {
|
||||||
/// All the proof requests.
|
/// All the proof requests.
|
||||||
pub requests: Vec<StateProof>,
|
pub requests: Vec<StateProof>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for contract code.
|
/// A request for contract code.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
|
pub struct ContractCode {
|
||||||
|
/// Block hash
|
||||||
|
pub block_hash: H256,
|
||||||
|
/// Account key (== sha3(address))
|
||||||
|
pub account_key: H256,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request for contract code.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct ContractCodes {
|
pub struct ContractCodes {
|
||||||
/// Block hash and account key (== sha3(address)) pairs to fetch code for.
|
/// Block hash and account key (== sha3(address)) pairs to fetch code for.
|
||||||
pub code_requests: Vec<(H256, H256)>,
|
pub code_requests: Vec<ContractCode>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for a header proof from the Canonical Hash Trie.
|
/// A request for a header proof from the Canonical Hash Trie.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct HeaderProof {
|
pub struct HeaderProof {
|
||||||
/// Number of the CHT.
|
/// Number of the CHT.
|
||||||
pub cht_number: u64,
|
pub cht_number: u64,
|
||||||
@ -90,14 +100,14 @@ pub struct HeaderProof {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A request for header proofs from the CHT.
|
/// A request for header proofs from the CHT.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub struct HeaderProofs {
|
pub struct HeaderProofs {
|
||||||
/// All the proof requests.
|
/// All the proof requests.
|
||||||
pub requests: Vec<HeaderProofs>,
|
pub requests: Vec<HeaderProof>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Kinds of requests.
|
/// Kinds of requests.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Binary)]
|
||||||
pub enum Kind {
|
pub enum Kind {
|
||||||
/// Requesting headers.
|
/// Requesting headers.
|
||||||
Headers,
|
Headers,
|
||||||
@ -114,7 +124,7 @@ pub enum Kind {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Encompasses all possible types of requests in a single structure.
|
/// Encompasses all possible types of requests in a single structure.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, Binary)]
|
||||||
pub enum Request {
|
pub enum Request {
|
||||||
/// Requesting headers.
|
/// Requesting headers.
|
||||||
Headers(Headers),
|
Headers(Headers),
|
||||||
@ -142,4 +152,16 @@ impl Request {
|
|||||||
Request::HeaderProofs(_) => Kind::HeaderProofs,
|
Request::HeaderProofs(_) => Kind::HeaderProofs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the amount of requests being made.
|
||||||
|
pub fn amount(&self) -> usize {
|
||||||
|
match *self {
|
||||||
|
Request::Headers(ref req) => req.max,
|
||||||
|
Request::Bodies(ref req) => req.block_hashes.len(),
|
||||||
|
Request::Receipts(ref req) => req.block_hashes.len(),
|
||||||
|
Request::StateProofs(ref req) => req.requests.len(),
|
||||||
|
Request::Codes(ref req) => req.code_requests.len(),
|
||||||
|
Request::HeaderProofs(ref req) => req.requests.len(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
@ -14,8 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import { PropTypes } from 'react';
|
//! Types used in the public (IPC) api which require custom code generation.
|
||||||
|
|
||||||
export default function (type) {
|
#![allow(dead_code, unused_assignments, unused_variables)] // codegen issues
|
||||||
return PropTypes.oneOfType([ PropTypes.oneOf([ null ]), type ]);
|
include!(concat!(env!("OUT_DIR"), "/mod.rs.in"));
|
||||||
}
|
|
17
ethcore/light/src/types/mod.rs.in
Normal file
17
ethcore/light/src/types/mod.rs.in
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
pub mod les_request;
|
@ -21,8 +21,7 @@
|
|||||||
"genesis": {
|
"genesis": {
|
||||||
"seal": {
|
"seal": {
|
||||||
"generic": {
|
"generic": {
|
||||||
"fields": 2,
|
"rlp": "0xc28080"
|
||||||
"rlp": "0x200"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"difficulty": "0x20000",
|
"difficulty": "0x20000",
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
"genesis": {
|
"genesis": {
|
||||||
"seal": {
|
"seal": {
|
||||||
"generic": {
|
"generic": {
|
||||||
"fields": 0,
|
|
||||||
"rlp": "0x0"
|
"rlp": "0x0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -16,9 +16,12 @@
|
|||||||
|
|
||||||
//! Account management.
|
//! Account management.
|
||||||
|
|
||||||
use std::{fs, fmt};
|
mod stores;
|
||||||
|
|
||||||
|
use self::stores::{AddressBook, DappsSettingsStore};
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
use util::{Mutex, RwLock, Itertools};
|
use util::{Mutex, RwLock, Itertools};
|
||||||
use ethstore::{SimpleSecretStore, SecretStore, Error as SSError, SafeAccount, EthStore, EthMultiStore, random_string};
|
use ethstore::{SimpleSecretStore, SecretStore, Error as SSError, SafeAccount, EthStore, EthMultiStore, random_string};
|
||||||
@ -106,77 +109,8 @@ impl KeyDirectory for NullDir {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Disk-backed map from Address to String. Uses JSON.
|
/// Dapp identifier
|
||||||
struct AddressBook {
|
pub type DappId = String;
|
||||||
path: PathBuf,
|
|
||||||
cache: HashMap<Address, AccountMeta>,
|
|
||||||
transient: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AddressBook {
|
|
||||||
pub fn new(path: String) -> Self {
|
|
||||||
trace!(target: "addressbook", "new({})", path);
|
|
||||||
let mut path: PathBuf = path.into();
|
|
||||||
path.push("address_book.json");
|
|
||||||
trace!(target: "addressbook", "path={:?}", path);
|
|
||||||
let mut r = AddressBook {
|
|
||||||
path: path,
|
|
||||||
cache: HashMap::new(),
|
|
||||||
transient: false,
|
|
||||||
};
|
|
||||||
r.revert();
|
|
||||||
r
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn transient() -> Self {
|
|
||||||
let mut book = AddressBook::new(Default::default());
|
|
||||||
book.transient = true;
|
|
||||||
book
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get(&self) -> HashMap<Address, AccountMeta> {
|
|
||||||
self.cache.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_name(&mut self, a: Address, name: String) {
|
|
||||||
let mut x = self.cache.get(&a)
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_else(|| AccountMeta {name: Default::default(), meta: "{}".to_owned(), uuid: None});
|
|
||||||
x.name = name;
|
|
||||||
self.cache.insert(a, x);
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_meta(&mut self, a: Address, meta: String) {
|
|
||||||
let mut x = self.cache.get(&a)
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_else(|| AccountMeta {name: "Anonymous".to_owned(), meta: Default::default(), uuid: None});
|
|
||||||
x.meta = meta;
|
|
||||||
self.cache.insert(a, x);
|
|
||||||
self.save();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn revert(&mut self) {
|
|
||||||
if self.transient { return; }
|
|
||||||
trace!(target: "addressbook", "revert");
|
|
||||||
let _ = fs::File::open(self.path.clone())
|
|
||||||
.map_err(|e| trace!(target: "addressbook", "Couldn't open address book: {}", e))
|
|
||||||
.and_then(|f| AccountMeta::read_address_map(&f)
|
|
||||||
.map_err(|e| warn!(target: "addressbook", "Couldn't read address book: {}", e))
|
|
||||||
.and_then(|m| { self.cache = m; Ok(()) })
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn save(&mut self) {
|
|
||||||
if self.transient { return; }
|
|
||||||
trace!(target: "addressbook", "save");
|
|
||||||
let _ = fs::File::create(self.path.clone())
|
|
||||||
.map_err(|e| warn!(target: "addressbook", "Couldn't open address book for writing: {}", e))
|
|
||||||
.and_then(|mut f| AccountMeta::write_address_map(&self.cache, &mut f)
|
|
||||||
.map_err(|e| warn!(target: "addressbook", "Couldn't write to address book: {}", e))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transient_sstore() -> EthMultiStore {
|
fn transient_sstore() -> EthMultiStore {
|
||||||
EthMultiStore::open(Box::new(NullDir::default())).expect("NullDir load always succeeds; qed")
|
EthMultiStore::open(Box::new(NullDir::default())).expect("NullDir load always succeeds; qed")
|
||||||
@ -189,6 +123,7 @@ type AccountToken = String;
|
|||||||
pub struct AccountProvider {
|
pub struct AccountProvider {
|
||||||
address_book: Mutex<AddressBook>,
|
address_book: Mutex<AddressBook>,
|
||||||
unlocked: Mutex<HashMap<Address, AccountData>>,
|
unlocked: Mutex<HashMap<Address, AccountData>>,
|
||||||
|
dapps_settings: RwLock<DappsSettingsStore>,
|
||||||
/// Accounts on disk
|
/// Accounts on disk
|
||||||
sstore: Box<SecretStore>,
|
sstore: Box<SecretStore>,
|
||||||
/// Accounts unlocked with rolling tokens
|
/// Accounts unlocked with rolling tokens
|
||||||
@ -200,7 +135,8 @@ impl AccountProvider {
|
|||||||
pub fn new(sstore: Box<SecretStore>) -> Self {
|
pub fn new(sstore: Box<SecretStore>) -> Self {
|
||||||
AccountProvider {
|
AccountProvider {
|
||||||
unlocked: Mutex::new(HashMap::new()),
|
unlocked: Mutex::new(HashMap::new()),
|
||||||
address_book: Mutex::new(AddressBook::new(sstore.local_path().into())),
|
address_book: RwLock::new(AddressBook::new(sstore.local_path().into())),
|
||||||
|
dapps_settings: RwLock::new(DappsSettingsStore::new(sstore.local_path().into())),
|
||||||
sstore: sstore,
|
sstore: sstore,
|
||||||
transient_sstore: transient_sstore(),
|
transient_sstore: transient_sstore(),
|
||||||
}
|
}
|
||||||
@ -209,8 +145,9 @@ impl AccountProvider {
|
|||||||
/// Creates not disk backed provider.
|
/// Creates not disk backed provider.
|
||||||
pub fn transient_provider() -> Self {
|
pub fn transient_provider() -> Self {
|
||||||
AccountProvider {
|
AccountProvider {
|
||||||
unlocked: Mutex::new(HashMap::new()),
|
|
||||||
address_book: Mutex::new(AddressBook::transient()),
|
address_book: Mutex::new(AddressBook::transient()),
|
||||||
|
unlocked: Mutex::new(HashMap::new()),
|
||||||
|
dapps_settings: RwLock::new(DappsSettingsStore::transient()),
|
||||||
sstore: Box::new(EthStore::open(Box::new(NullDir::default())).expect("NullDir load always succeeds; qed")),
|
sstore: Box::new(EthStore::open(Box::new(NullDir::default())).expect("NullDir load always succeeds; qed")),
|
||||||
transient_sstore: transient_sstore(),
|
transient_sstore: transient_sstore(),
|
||||||
}
|
}
|
||||||
@ -255,19 +192,36 @@ impl AccountProvider {
|
|||||||
Ok(accounts)
|
Ok(accounts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets addresses visile for dapp.
|
||||||
|
pub fn dapps_addresses(&self, dapp: DappId) -> Result<Vec<Address>, Error> {
|
||||||
|
let accounts = self.dapps_settings.read().get();
|
||||||
|
Ok(accounts.get(&dapp).map(|settings| settings.accounts.clone()).unwrap_or_else(Vec::new))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets addresses visile for dapp.
|
||||||
|
pub fn set_dapps_addresses(&self, dapp: DappId, addresses: Vec<Address>) -> Result<(), Error> {
|
||||||
|
self.dapps_settings.write().set_accounts(dapp, addresses);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns each address along with metadata.
|
/// Returns each address along with metadata.
|
||||||
pub fn addresses_info(&self) -> Result<HashMap<Address, AccountMeta>, Error> {
|
pub fn addresses_info(&self) -> Result<HashMap<Address, AccountMeta>, Error> {
|
||||||
Ok(self.address_book.lock().get())
|
Ok(self.address_book.read().get())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns each address along with metadata.
|
/// Returns each address along with metadata.
|
||||||
pub fn set_address_name(&self, account: Address, name: String) -> Result<(), Error> {
|
pub fn set_address_name(&self, account: Address, name: String) -> Result<(), Error> {
|
||||||
Ok(self.address_book.lock().set_name(account, name))
|
Ok(self.address_book.write().set_name(account, name))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns each address along with metadata.
|
/// Returns each address along with metadata.
|
||||||
pub fn set_address_meta(&self, account: Address, meta: String) -> Result<(), Error> {
|
pub fn set_address_meta(&self, account: Address, meta: String) -> Result<(), Error> {
|
||||||
Ok(self.address_book.lock().set_meta(account, meta))
|
Ok(self.address_book.write().set_meta(account, meta))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes and address from the addressbook
|
||||||
|
pub fn remove_address(&self, addr: Address) -> Result<(), Error> {
|
||||||
|
Ok(self.address_book.write().remove(addr))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns each account along with name and meta.
|
/// Returns each account along with name and meta.
|
||||||
@ -443,23 +397,9 @@ impl AccountProvider {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{AccountProvider, AddressBook, Unlock};
|
use super::{AccountProvider, Unlock};
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use ethjson::misc::AccountMeta;
|
|
||||||
use ethstore::ethkey::{Generator, Random};
|
use ethstore::ethkey::{Generator, Random};
|
||||||
use devtools::RandomTempPath;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn should_save_and_reload_address_book() {
|
|
||||||
let temp = RandomTempPath::create_dir();
|
|
||||||
let path = temp.as_str().to_owned();
|
|
||||||
let mut b = AddressBook::new(path.clone());
|
|
||||||
b.set_name(1.into(), "One".to_owned());
|
|
||||||
b.set_meta(1.into(), "{1:1}".to_owned());
|
|
||||||
let b = AddressBook::new(path);
|
|
||||||
assert_eq!(b.get(), hash_map![1.into() => AccountMeta{name: "One".to_owned(), meta: "{1:1}".to_owned(), uuid: None}]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn unlock_account_temp() {
|
fn unlock_account_temp() {
|
||||||
@ -513,4 +453,16 @@ mod tests {
|
|||||||
.expect("First usage of token should be correct.");
|
.expect("First usage of token should be correct.");
|
||||||
assert!(ap.sign_with_token(kp.address(), token, Default::default()).is_err(), "Second usage of the same token should fail.");
|
assert!(ap.sign_with_token(kp.address(), token, Default::default()).is_err(), "Second usage of the same token should fail.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn should_set_dapps_addresses() {
|
||||||
|
// given
|
||||||
|
let ap = AccountProvider::transient_provider();
|
||||||
|
let app = "app1".to_owned();
|
||||||
|
|
||||||
|
// when
|
||||||
|
ap.set_dapps_addresses(app.clone(), vec![1.into(), 2.into()]).unwrap();
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(ap.dapps_addresses(app.clone()).unwrap(), vec![1.into(), 2.into()]);
|
||||||
|
}
|
||||||
}
|
}
|
271
ethcore/src/account_provider/stores.rs
Normal file
271
ethcore/src/account_provider/stores.rs
Normal file
@ -0,0 +1,271 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Address Book and Dapps Settings Store
|
||||||
|
|
||||||
|
use std::{fs, fmt, hash, ops};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use ethstore::ethkey::Address;
|
||||||
|
use ethjson::misc::{AccountMeta, DappsSettings as JsonSettings};
|
||||||
|
use account_provider::DappId;
|
||||||
|
|
||||||
|
/// Disk-backed map from Address to String. Uses JSON.
|
||||||
|
pub struct AddressBook {
|
||||||
|
cache: DiskMap<Address, AccountMeta>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AddressBook {
|
||||||
|
/// Creates new address book at given directory.
|
||||||
|
pub fn new(path: String) -> Self {
|
||||||
|
let mut r = AddressBook {
|
||||||
|
cache: DiskMap::new(path, "address_book.json".into())
|
||||||
|
};
|
||||||
|
r.cache.revert(AccountMeta::read_address_map);
|
||||||
|
r
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates transient address book (no changes are saved to disk).
|
||||||
|
pub fn transient() -> Self {
|
||||||
|
AddressBook {
|
||||||
|
cache: DiskMap::transient()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the address book.
|
||||||
|
pub fn get(&self) -> HashMap<Address, AccountMeta> {
|
||||||
|
self.cache.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save(&self) {
|
||||||
|
self.cache.save(AccountMeta::write_address_map)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets new name for given address.
|
||||||
|
pub fn set_name(&mut self, a: Address, name: String) {
|
||||||
|
{
|
||||||
|
let mut x = self.cache.entry(a)
|
||||||
|
.or_insert_with(|| AccountMeta {name: Default::default(), meta: "{}".to_owned(), uuid: None});
|
||||||
|
x.name = name;
|
||||||
|
}
|
||||||
|
self.save();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets new meta for given address.
|
||||||
|
pub fn set_meta(&mut self, a: Address, meta: String) {
|
||||||
|
{
|
||||||
|
let mut x = self.cache.entry(a)
|
||||||
|
.or_insert_with(|| AccountMeta {name: "Anonymous".to_owned(), meta: Default::default(), uuid: None});
|
||||||
|
x.meta = meta;
|
||||||
|
}
|
||||||
|
self.save();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes an entry
|
||||||
|
pub fn remove(&mut self, a: Address) {
|
||||||
|
self.cache.remove(&a);
|
||||||
|
self.save();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Dapps user settings
|
||||||
|
#[derive(Debug, Default, Clone, Eq, PartialEq)]
|
||||||
|
pub struct DappsSettings {
|
||||||
|
/// A list of visible accounts
|
||||||
|
pub accounts: Vec<Address>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<JsonSettings> for DappsSettings {
|
||||||
|
fn from(s: JsonSettings) -> Self {
|
||||||
|
DappsSettings {
|
||||||
|
accounts: s.accounts.into_iter().map(Into::into).collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<DappsSettings> for JsonSettings {
|
||||||
|
fn from(s: DappsSettings) -> Self {
|
||||||
|
JsonSettings {
|
||||||
|
accounts: s.accounts.into_iter().map(Into::into).collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disk-backed map from DappId to Settings. Uses JSON.
|
||||||
|
pub struct DappsSettingsStore {
|
||||||
|
cache: DiskMap<DappId, DappsSettings>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DappsSettingsStore {
|
||||||
|
/// Creates new store at given directory path.
|
||||||
|
pub fn new(path: String) -> Self {
|
||||||
|
let mut r = DappsSettingsStore {
|
||||||
|
cache: DiskMap::new(path, "dapps_accounts.json".into())
|
||||||
|
};
|
||||||
|
r.cache.revert(JsonSettings::read_dapps_settings);
|
||||||
|
r
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates transient store (no changes are saved to disk).
|
||||||
|
pub fn transient() -> Self {
|
||||||
|
DappsSettingsStore {
|
||||||
|
cache: DiskMap::transient()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get copy of the dapps settings
|
||||||
|
pub fn get(&self) -> HashMap<DappId, DappsSettings> {
|
||||||
|
self.cache.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save(&self) {
|
||||||
|
self.cache.save(JsonSettings::write_dapps_settings)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_accounts(&mut self, id: DappId, accounts: Vec<Address>) {
|
||||||
|
{
|
||||||
|
let mut settings = self.cache.entry(id).or_insert_with(DappsSettings::default);
|
||||||
|
settings.accounts = accounts;
|
||||||
|
}
|
||||||
|
self.save();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disk-serializable HashMap
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct DiskMap<K: hash::Hash + Eq, V> {
|
||||||
|
path: PathBuf,
|
||||||
|
cache: HashMap<K, V>,
|
||||||
|
transient: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K: hash::Hash + Eq, V> ops::Deref for DiskMap<K, V> {
|
||||||
|
type Target = HashMap<K, V>;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K: hash::Hash + Eq, V> ops::DerefMut for DiskMap<K, V> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K: hash::Hash + Eq, V> DiskMap<K, V> {
|
||||||
|
pub fn new(path: String, file_name: String) -> Self {
|
||||||
|
trace!(target: "diskmap", "new({})", path);
|
||||||
|
let mut path: PathBuf = path.into();
|
||||||
|
path.push(file_name);
|
||||||
|
trace!(target: "diskmap", "path={:?}", path);
|
||||||
|
DiskMap {
|
||||||
|
path: path,
|
||||||
|
cache: HashMap::new(),
|
||||||
|
transient: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn transient() -> Self {
|
||||||
|
let mut map = DiskMap::new(Default::default(), "diskmap.json".into());
|
||||||
|
map.transient = true;
|
||||||
|
map
|
||||||
|
}
|
||||||
|
|
||||||
|
fn revert<F, E>(&mut self, read: F) where
|
||||||
|
F: Fn(fs::File) -> Result<HashMap<K, V>, E>,
|
||||||
|
E: fmt::Display,
|
||||||
|
{
|
||||||
|
if self.transient { return; }
|
||||||
|
trace!(target: "diskmap", "revert {:?}", self.path);
|
||||||
|
let _ = fs::File::open(self.path.clone())
|
||||||
|
.map_err(|e| trace!(target: "diskmap", "Couldn't open disk map: {}", e))
|
||||||
|
.and_then(|f| read(f).map_err(|e| warn!(target: "diskmap", "Couldn't read disk map: {}", e)))
|
||||||
|
.and_then(|m| {
|
||||||
|
self.cache = m;
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save<F, E>(&self, write: F) where
|
||||||
|
F: Fn(&HashMap<K, V>, &mut fs::File) -> Result<(), E>,
|
||||||
|
E: fmt::Display,
|
||||||
|
{
|
||||||
|
if self.transient { return; }
|
||||||
|
trace!(target: "diskmap", "save {:?}", self.path);
|
||||||
|
let _ = fs::File::create(self.path.clone())
|
||||||
|
.map_err(|e| warn!(target: "diskmap", "Couldn't open disk map for writing: {}", e))
|
||||||
|
.and_then(|mut f| {
|
||||||
|
write(&self.cache, &mut f).map_err(|e| warn!(target: "diskmap", "Couldn't write to disk map: {}", e))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{AddressBook, DappsSettingsStore, DappsSettings};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use ethjson::misc::AccountMeta;
|
||||||
|
use devtools::RandomTempPath;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_save_and_reload_address_book() {
|
||||||
|
let temp = RandomTempPath::create_dir();
|
||||||
|
let path = temp.as_str().to_owned();
|
||||||
|
let mut b = AddressBook::new(path.clone());
|
||||||
|
b.set_name(1.into(), "One".to_owned());
|
||||||
|
b.set_meta(1.into(), "{1:1}".to_owned());
|
||||||
|
let b = AddressBook::new(path);
|
||||||
|
assert_eq!(b.get(), hash_map![1.into() => AccountMeta{name: "One".to_owned(), meta: "{1:1}".to_owned(), uuid: None}]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_save_and_reload_dapps_settings() {
|
||||||
|
// given
|
||||||
|
let temp = RandomTempPath::create_dir();
|
||||||
|
let path = temp.as_str().to_owned();
|
||||||
|
let mut b = DappsSettingsStore::new(path.clone());
|
||||||
|
|
||||||
|
// when
|
||||||
|
b.set_accounts("dappOne".into(), vec![1.into(), 2.into()]);
|
||||||
|
|
||||||
|
// then
|
||||||
|
let b = DappsSettingsStore::new(path);
|
||||||
|
assert_eq!(b.get(), hash_map![
|
||||||
|
"dappOne".into() => DappsSettings {
|
||||||
|
accounts: vec![1.into(), 2.into()],
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_remove_address() {
|
||||||
|
let temp = RandomTempPath::create_dir();
|
||||||
|
let path = temp.as_str().to_owned();
|
||||||
|
let mut b = AddressBook::new(path.clone());
|
||||||
|
|
||||||
|
b.set_name(1.into(), "One".to_owned());
|
||||||
|
b.set_name(2.into(), "Two".to_owned());
|
||||||
|
b.set_name(3.into(), "Three".to_owned());
|
||||||
|
b.remove(2.into());
|
||||||
|
|
||||||
|
let b = AddressBook::new(path);
|
||||||
|
assert_eq!(b.get(), hash_map![
|
||||||
|
1.into() => AccountMeta{name: "One".to_owned(), meta: "{}".to_owned(), uuid: None},
|
||||||
|
3.into() => AccountMeta{name: "Three".to_owned(), meta: "{}".to_owned(), uuid: None}
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
}
|
@ -34,6 +34,7 @@ use blockchain::update::ExtrasUpdate;
|
|||||||
use blockchain::{CacheSize, ImportRoute, Config};
|
use blockchain::{CacheSize, ImportRoute, Config};
|
||||||
use db::{self, Writable, Readable, CacheUpdatePolicy};
|
use db::{self, Writable, Readable, CacheUpdatePolicy};
|
||||||
use cache_manager::CacheManager;
|
use cache_manager::CacheManager;
|
||||||
|
use engines::Engine;
|
||||||
|
|
||||||
const LOG_BLOOMS_LEVELS: usize = 3;
|
const LOG_BLOOMS_LEVELS: usize = 3;
|
||||||
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
|
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
|
||||||
@ -198,6 +199,9 @@ pub struct BlockChain {
|
|||||||
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
||||||
pending_block_details: RwLock<HashMap<H256, BlockDetails>>,
|
pending_block_details: RwLock<HashMap<H256, BlockDetails>>,
|
||||||
pending_transaction_addresses: RwLock<HashMap<H256, Option<TransactionAddress>>>,
|
pending_transaction_addresses: RwLock<HashMap<H256, Option<TransactionAddress>>>,
|
||||||
|
|
||||||
|
// Used for block ordering.
|
||||||
|
engine: Arc<Engine>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockProvider for BlockChain {
|
impl BlockProvider for BlockChain {
|
||||||
@ -415,9 +419,8 @@ impl<'a> Iterator for AncestryIter<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChain {
|
impl BlockChain {
|
||||||
#[cfg_attr(feature="dev", allow(useless_let_if_seq))]
|
/// Create new instance of blockchain from given Genesis and block picking rules of Engine.
|
||||||
/// Create new instance of blockchain from given Genesis
|
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>, engine: Arc<Engine>) -> BlockChain {
|
||||||
pub fn new(config: Config, genesis: &[u8], db: Arc<Database>) -> BlockChain {
|
|
||||||
// 400 is the avarage size of the key
|
// 400 is the avarage size of the key
|
||||||
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
|
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
|
||||||
|
|
||||||
@ -442,6 +445,7 @@ impl BlockChain {
|
|||||||
pending_block_hashes: RwLock::new(HashMap::new()),
|
pending_block_hashes: RwLock::new(HashMap::new()),
|
||||||
pending_block_details: RwLock::new(HashMap::new()),
|
pending_block_details: RwLock::new(HashMap::new()),
|
||||||
pending_transaction_addresses: RwLock::new(HashMap::new()),
|
pending_transaction_addresses: RwLock::new(HashMap::new()),
|
||||||
|
engine: engine,
|
||||||
};
|
};
|
||||||
|
|
||||||
// load best block
|
// load best block
|
||||||
@ -858,13 +862,12 @@ impl BlockChain {
|
|||||||
let number = header.number();
|
let number = header.number();
|
||||||
let parent_hash = header.parent_hash();
|
let parent_hash = header.parent_hash();
|
||||||
let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
let parent_details = self.block_details(&parent_hash).unwrap_or_else(|| panic!("Invalid parent hash: {:?}", parent_hash));
|
||||||
let total_difficulty = parent_details.total_difficulty + header.difficulty();
|
let is_new_best = self.engine.is_new_best_block(self.best_block_total_difficulty(), HeaderView::new(&self.best_block_header()), &parent_details, header);
|
||||||
let is_new_best = total_difficulty > self.best_block_total_difficulty();
|
|
||||||
|
|
||||||
BlockInfo {
|
BlockInfo {
|
||||||
hash: hash,
|
hash: hash,
|
||||||
number: number,
|
number: number,
|
||||||
total_difficulty: total_difficulty,
|
total_difficulty: parent_details.total_difficulty + header.difficulty(),
|
||||||
location: if is_new_best {
|
location: if is_new_best {
|
||||||
// on new best block we need to make sure that all ancestors
|
// on new best block we need to make sure that all ancestors
|
||||||
// are moved to "canon chain"
|
// are moved to "canon chain"
|
||||||
@ -1319,11 +1322,16 @@ mod tests {
|
|||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
use transaction::{Transaction, Action};
|
use transaction::{Transaction, Action};
|
||||||
use log_entry::{LogEntry, LocalizedLogEntry};
|
use log_entry::{LogEntry, LocalizedLogEntry};
|
||||||
|
use spec::Spec;
|
||||||
|
|
||||||
fn new_db(path: &str) -> Arc<Database> {
|
fn new_db(path: &str) -> Arc<Database> {
|
||||||
Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap())
|
Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), path).unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new_chain(genesis: &[u8], db: Arc<Database>) -> BlockChain {
|
||||||
|
BlockChain::new(Config::default(), genesis, db, Spec::new_null().engine)
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_cache_best_block() {
|
fn should_cache_best_block() {
|
||||||
// given
|
// given
|
||||||
@ -1334,7 +1342,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
assert_eq!(bc.best_block_number(), 0);
|
assert_eq!(bc.best_block_number(), 0);
|
||||||
|
|
||||||
// when
|
// when
|
||||||
@ -1360,7 +1368,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
assert_eq!(bc.genesis_hash(), genesis_hash.clone());
|
assert_eq!(bc.genesis_hash(), genesis_hash.clone());
|
||||||
assert_eq!(bc.best_block_hash(), genesis_hash.clone());
|
assert_eq!(bc.best_block_hash(), genesis_hash.clone());
|
||||||
@ -1391,7 +1399,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let mut block_hashes = vec![genesis_hash.clone()];
|
let mut block_hashes = vec![genesis_hash.clone()];
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
@ -1427,7 +1435,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let mut batch =db.transaction();
|
let mut batch =db.transaction();
|
||||||
for b in &[&b1a, &b1b, &b2a, &b2b, &b3a, &b3b, &b4a, &b4b, &b5a, &b5b] {
|
for b in &[&b1a, &b1b, &b2a, &b2b, &b3a, &b3b, &b4a, &b4b, &b5a, &b5b] {
|
||||||
@ -1489,7 +1497,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
let _ = bc.insert_block(&mut batch, &b1a, vec![]);
|
let _ = bc.insert_block(&mut batch, &b1a, vec![]);
|
||||||
@ -1577,7 +1585,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
let _ = bc.insert_block(&mut batch, &b1a, vec![]);
|
let _ = bc.insert_block(&mut batch, &b1a, vec![]);
|
||||||
@ -1639,7 +1647,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
let ir1 = bc.insert_block(&mut batch, &b1, vec![]);
|
let ir1 = bc.insert_block(&mut batch, &b1, vec![]);
|
||||||
@ -1755,7 +1763,7 @@ mod tests {
|
|||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
{
|
{
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
assert_eq!(bc.best_block_hash(), genesis_hash);
|
assert_eq!(bc.best_block_hash(), genesis_hash);
|
||||||
let mut batch =db.transaction();
|
let mut batch =db.transaction();
|
||||||
bc.insert_block(&mut batch, &first, vec![]);
|
bc.insert_block(&mut batch, &first, vec![]);
|
||||||
@ -1766,7 +1774,7 @@ mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
assert_eq!(bc.best_block_hash(), first_hash);
|
assert_eq!(bc.best_block_hash(), first_hash);
|
||||||
}
|
}
|
||||||
@ -1821,7 +1829,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
let mut batch =db.transaction();
|
let mut batch =db.transaction();
|
||||||
bc.insert_block(&mut batch, &b1, vec![]);
|
bc.insert_block(&mut batch, &b1, vec![]);
|
||||||
db.write(batch).unwrap();
|
db.write(batch).unwrap();
|
||||||
@ -1881,7 +1889,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
insert_block(&db, &bc, &b1, vec![Receipt {
|
insert_block(&db, &bc, &b1, vec![Receipt {
|
||||||
state_root: H256::default(),
|
state_root: H256::default(),
|
||||||
gas_used: 10_000.into(),
|
gas_used: 10_000.into(),
|
||||||
@ -1985,7 +1993,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||||
@ -2042,7 +2050,7 @@ mod tests {
|
|||||||
|
|
||||||
{
|
{
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
|
||||||
|
|
||||||
let mut batch =db.transaction();
|
let mut batch =db.transaction();
|
||||||
@ -2061,7 +2069,7 @@ mod tests {
|
|||||||
|
|
||||||
// re-loading the blockchain should load the correct best block.
|
// re-loading the blockchain should load the correct best block.
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
assert_eq!(bc.best_block_number(), 5);
|
assert_eq!(bc.best_block_number(), 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2078,7 +2086,7 @@ mod tests {
|
|||||||
|
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(Config::default(), &genesis, db.clone());
|
let bc = new_chain(&genesis, db.clone());
|
||||||
|
|
||||||
let mut batch =db.transaction();
|
let mut batch =db.transaction();
|
||||||
bc.insert_block(&mut batch, &first, vec![]);
|
bc.insert_block(&mut batch, &first, vec![]);
|
||||||
|
@ -52,7 +52,7 @@ use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
|||||||
use client::{
|
use client::{
|
||||||
BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient,
|
BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient,
|
||||||
MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
|
MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
|
||||||
ChainNotify,
|
ChainNotify, PruningInfo, ProvingBlockChainClient,
|
||||||
};
|
};
|
||||||
use client::Error as ClientError;
|
use client::Error as ClientError;
|
||||||
use env_info::EnvInfo;
|
use env_info::EnvInfo;
|
||||||
@ -164,7 +164,7 @@ impl Client {
|
|||||||
let gb = spec.genesis_block();
|
let gb = spec.genesis_block();
|
||||||
|
|
||||||
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().expect("DB path could not be converted to string.")).map_err(ClientError::Database)));
|
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().expect("DB path could not be converted to string.")).map_err(ClientError::Database)));
|
||||||
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
|
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone(), spec.engine.clone()));
|
||||||
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
|
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
|
||||||
|
|
||||||
let trie_spec = match config.fat_db {
|
let trie_spec = match config.fat_db {
|
||||||
@ -787,7 +787,7 @@ impl snapshot::DatabaseRestore for Client {
|
|||||||
|
|
||||||
let cache_size = state_db.cache_size();
|
let cache_size = state_db.cache_size();
|
||||||
*state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE), cache_size);
|
*state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE), cache_size);
|
||||||
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
|
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone(), self.engine.clone()));
|
||||||
*tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
|
*tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -1272,7 +1272,7 @@ impl BlockChainClient for Client {
|
|||||||
self.miner.pending_transactions(self.chain.read().best_block_number())
|
self.miner.pending_transactions(self.chain.read().best_block_number())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self) -> Option<u8> {
|
fn signing_network_id(&self) -> Option<u64> {
|
||||||
self.engine.signing_network_id(&self.latest_env_info())
|
self.engine.signing_network_id(&self.latest_env_info())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1286,6 +1286,13 @@ impl BlockChainClient for Client {
|
|||||||
self.uncle(id)
|
self.uncle(id)
|
||||||
.map(|header| self.engine.extra_info(&decode(&header)))
|
.map(|header| self.engine.extra_info(&decode(&header)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn pruning_info(&self) -> PruningInfo {
|
||||||
|
PruningInfo {
|
||||||
|
earliest_chain: self.chain.read().first_block_number().unwrap_or(1),
|
||||||
|
earliest_state: self.state_db.lock().journal_db().earliest_era().unwrap_or(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MiningBlockChainClient for Client {
|
impl MiningBlockChainClient for Client {
|
||||||
@ -1370,32 +1377,60 @@ impl MayPanic for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ProvingBlockChainClient for Client {
|
||||||
|
fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockID) -> Vec<Bytes> {
|
||||||
|
self.state_at(id)
|
||||||
|
.and_then(move |state| state.prove_storage(key1, key2, from_level).ok())
|
||||||
|
.unwrap_or_else(Vec::new)
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
fn prove_account(&self, key1: H256, from_level: u32, id: BlockID) -> Vec<Bytes> {
|
||||||
fn should_not_cache_details_before_commit() {
|
self.state_at(id)
|
||||||
use tests::helpers::*;
|
.and_then(move |state| state.prove_account(key1, from_level).ok())
|
||||||
use std::thread;
|
.unwrap_or_else(Vec::new)
|
||||||
use std::time::Duration;
|
}
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
|
|
||||||
let client = generate_dummy_client(0);
|
fn code_by_hash(&self, account_key: H256, id: BlockID) -> Bytes {
|
||||||
let genesis = client.chain_info().best_block_hash;
|
self.state_at(id)
|
||||||
let (new_hash, new_block) = get_good_dummy_block_hash();
|
.and_then(move |state| state.code_by_address_hash(account_key).ok())
|
||||||
|
.and_then(|x| x)
|
||||||
let go = {
|
.unwrap_or_else(Vec::new)
|
||||||
// Separate thread uncommited transaction
|
}
|
||||||
let go = Arc::new(AtomicBool::new(false));
|
}
|
||||||
let go_thread = go.clone();
|
|
||||||
let another_client = client.reference().clone();
|
#[cfg(test)]
|
||||||
thread::spawn(move || {
|
mod tests {
|
||||||
let mut batch = DBTransaction::new(&*another_client.chain.read().db().clone());
|
|
||||||
another_client.chain.read().insert_block(&mut batch, &new_block, Vec::new());
|
#[test]
|
||||||
go_thread.store(true, Ordering::SeqCst);
|
fn should_not_cache_details_before_commit() {
|
||||||
});
|
use client::BlockChainClient;
|
||||||
go
|
use tests::helpers::*;
|
||||||
};
|
|
||||||
|
use std::thread;
|
||||||
while !go.load(Ordering::SeqCst) { thread::park_timeout(Duration::from_millis(5)); }
|
use std::time::Duration;
|
||||||
|
use std::sync::Arc;
|
||||||
assert!(client.tree_route(&genesis, &new_hash).is_none());
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use util::kvdb::DBTransaction;
|
||||||
|
|
||||||
|
let client = generate_dummy_client(0);
|
||||||
|
let genesis = client.chain_info().best_block_hash;
|
||||||
|
let (new_hash, new_block) = get_good_dummy_block_hash();
|
||||||
|
|
||||||
|
let go = {
|
||||||
|
// Separate thread uncommited transaction
|
||||||
|
let go = Arc::new(AtomicBool::new(false));
|
||||||
|
let go_thread = go.clone();
|
||||||
|
let another_client = client.reference().clone();
|
||||||
|
thread::spawn(move || {
|
||||||
|
let mut batch = DBTransaction::new(&*another_client.chain.read().db().clone());
|
||||||
|
another_client.chain.read().insert_block(&mut batch, &new_block, Vec::new());
|
||||||
|
go_thread.store(true, Ordering::SeqCst);
|
||||||
|
});
|
||||||
|
go
|
||||||
|
};
|
||||||
|
|
||||||
|
while !go.load(Ordering::SeqCst) { thread::park_timeout(Duration::from_millis(5)); }
|
||||||
|
|
||||||
|
assert!(client.tree_route(&genesis, &new_hash).is_none());
|
||||||
|
}
|
||||||
}
|
}
|
@ -25,18 +25,21 @@ mod client;
|
|||||||
pub use self::client::*;
|
pub use self::client::*;
|
||||||
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType};
|
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType};
|
||||||
pub use self::error::Error;
|
pub use self::error::Error;
|
||||||
pub use types::ids::*;
|
|
||||||
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
||||||
|
pub use self::chain_notify::ChainNotify;
|
||||||
|
pub use self::traits::{BlockChainClient, MiningBlockChainClient, ProvingBlockChainClient};
|
||||||
|
|
||||||
|
pub use types::ids::*;
|
||||||
pub use types::trace_filter::Filter as TraceFilter;
|
pub use types::trace_filter::Filter as TraceFilter;
|
||||||
|
pub use types::pruning_info::PruningInfo;
|
||||||
|
pub use types::call_analytics::CallAnalytics;
|
||||||
|
|
||||||
pub use executive::{Executed, Executive, TransactOptions};
|
pub use executive::{Executed, Executive, TransactOptions};
|
||||||
pub use env_info::{LastHashes, EnvInfo};
|
pub use env_info::{LastHashes, EnvInfo};
|
||||||
pub use self::chain_notify::ChainNotify;
|
|
||||||
|
|
||||||
pub use types::call_analytics::CallAnalytics;
|
|
||||||
pub use block_import_error::BlockImportError;
|
pub use block_import_error::BlockImportError;
|
||||||
pub use transaction_import::TransactionImportResult;
|
pub use transaction_import::TransactionImportResult;
|
||||||
pub use transaction_import::TransactionImportError;
|
pub use transaction_import::TransactionImportError;
|
||||||
pub use self::traits::{BlockChainClient, MiningBlockChainClient};
|
|
||||||
pub use verification::VerifierType;
|
pub use verification::VerifierType;
|
||||||
|
|
||||||
/// IPC interfaces
|
/// IPC interfaces
|
||||||
|
@ -38,6 +38,7 @@ use evm::{Factory as EvmFactory, VMType, Schedule};
|
|||||||
use miner::{Miner, MinerService, TransactionImportResult};
|
use miner::{Miner, MinerService, TransactionImportResult};
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use types::mode::Mode;
|
use types::mode::Mode;
|
||||||
|
use types::pruning_info::PruningInfo;
|
||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
|
|
||||||
use verification::queue::QueueInfo;
|
use verification::queue::QueueInfo;
|
||||||
@ -662,9 +663,16 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
self.miner.pending_transactions(self.chain_info().best_block_number)
|
self.miner.pending_transactions(self.chain_info().best_block_number)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self) -> Option<u8> { None }
|
fn signing_network_id(&self) -> Option<u64> { None }
|
||||||
|
|
||||||
fn mode(&self) -> Mode { Mode::Active }
|
fn mode(&self) -> Mode { Mode::Active }
|
||||||
|
|
||||||
fn set_mode(&self, _: Mode) { unimplemented!(); }
|
fn set_mode(&self, _: Mode) { unimplemented!(); }
|
||||||
|
|
||||||
|
fn pruning_info(&self) -> PruningInfo {
|
||||||
|
PruningInfo {
|
||||||
|
earliest_chain: 1,
|
||||||
|
earliest_state: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@ use types::call_analytics::CallAnalytics;
|
|||||||
use types::blockchain_info::BlockChainInfo;
|
use types::blockchain_info::BlockChainInfo;
|
||||||
use types::block_status::BlockStatus;
|
use types::block_status::BlockStatus;
|
||||||
use types::mode::Mode;
|
use types::mode::Mode;
|
||||||
|
use types::pruning_info::PruningInfo;
|
||||||
|
|
||||||
#[ipc(client_ident="RemoteClient")]
|
#[ipc(client_ident="RemoteClient")]
|
||||||
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
||||||
@ -237,7 +238,7 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the preferred network ID to sign on
|
/// Get the preferred network ID to sign on
|
||||||
fn signing_network_id(&self) -> Option<u8>;
|
fn signing_network_id(&self) -> Option<u64>;
|
||||||
|
|
||||||
/// Get the mode.
|
/// Get the mode.
|
||||||
fn mode(&self) -> Mode;
|
fn mode(&self) -> Mode;
|
||||||
@ -250,10 +251,15 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
|
|
||||||
/// Returns engine-related extra info for `UncleID`.
|
/// Returns engine-related extra info for `UncleID`.
|
||||||
fn uncle_extra_info(&self, id: UncleID) -> Option<BTreeMap<String, String>>;
|
fn uncle_extra_info(&self, id: UncleID) -> Option<BTreeMap<String, String>>;
|
||||||
|
|
||||||
|
/// Returns information about pruning/data availability.
|
||||||
|
fn pruning_info(&self) -> PruningInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl IpcConfig for BlockChainClient { }
|
||||||
|
|
||||||
/// Extended client interface used for mining
|
/// Extended client interface used for mining
|
||||||
pub trait MiningBlockChainClient : BlockChainClient {
|
pub trait MiningBlockChainClient: BlockChainClient {
|
||||||
/// Returns OpenBlock prepared for closing.
|
/// Returns OpenBlock prepared for closing.
|
||||||
fn prepare_open_block(&self,
|
fn prepare_open_block(&self,
|
||||||
author: Address,
|
author: Address,
|
||||||
@ -271,4 +277,23 @@ pub trait MiningBlockChainClient : BlockChainClient {
|
|||||||
fn latest_schedule(&self) -> Schedule;
|
fn latest_schedule(&self) -> Schedule;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IpcConfig for BlockChainClient { }
|
/// Extended client interface for providing proofs of the state.
|
||||||
|
pub trait ProvingBlockChainClient: BlockChainClient {
|
||||||
|
/// Prove account storage at a specific block id.
|
||||||
|
///
|
||||||
|
/// Both provided keys assume a secure trie.
|
||||||
|
/// Returns a vector of raw trie nodes (in order from the root) proving the storage query.
|
||||||
|
/// Nodes after `from_level` may be omitted.
|
||||||
|
/// An empty vector indicates unservable query.
|
||||||
|
fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockID) -> Vec<Bytes>;
|
||||||
|
|
||||||
|
/// Prove account existence at a specific block id.
|
||||||
|
/// The key is the keccak hash of the account's address.
|
||||||
|
/// Returns a vector of raw trie nodes (in order from the root) proving the query.
|
||||||
|
/// Nodes after `from_level` may be omitted.
|
||||||
|
/// An empty vector indicates unservable query.
|
||||||
|
fn prove_account(&self, key1: H256, from_level: u32, id: BlockID) -> Vec<Bytes>;
|
||||||
|
|
||||||
|
/// Get code by address hash.
|
||||||
|
fn code_by_hash(&self, account_key: H256, id: BlockID) -> Bytes;
|
||||||
|
}
|
@ -21,13 +21,15 @@ use std::sync::Weak;
|
|||||||
use std::time::{UNIX_EPOCH, Duration};
|
use std::time::{UNIX_EPOCH, Duration};
|
||||||
use util::*;
|
use util::*;
|
||||||
use ethkey::{verify_address, Signature};
|
use ethkey::{verify_address, Signature};
|
||||||
use rlp::{UntrustedRlp, View, encode};
|
use rlp::{UntrustedRlp, Rlp, View, encode};
|
||||||
use account_provider::AccountProvider;
|
use account_provider::AccountProvider;
|
||||||
use block::*;
|
use block::*;
|
||||||
use spec::CommonParams;
|
use spec::CommonParams;
|
||||||
use engines::Engine;
|
use engines::Engine;
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use error::{Error, BlockError};
|
use error::{Error, BlockError};
|
||||||
|
use blockchain::extras::BlockDetails;
|
||||||
|
use views::HeaderView;
|
||||||
use evm::Schedule;
|
use evm::Schedule;
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use io::{IoContext, IoHandler, TimerToken, IoService, IoChannel};
|
use io::{IoContext, IoHandler, TimerToken, IoService, IoChannel};
|
||||||
@ -66,18 +68,20 @@ pub struct AuthorityRound {
|
|||||||
params: CommonParams,
|
params: CommonParams,
|
||||||
our_params: AuthorityRoundParams,
|
our_params: AuthorityRoundParams,
|
||||||
builtins: BTreeMap<Address, Builtin>,
|
builtins: BTreeMap<Address, Builtin>,
|
||||||
transition_service: IoService<BlockArrived>,
|
transition_service: IoService<()>,
|
||||||
message_channel: Mutex<Option<IoChannel<ClientIoMessage>>>,
|
message_channel: Mutex<Option<IoChannel<ClientIoMessage>>>,
|
||||||
step: AtomicUsize,
|
step: AtomicUsize,
|
||||||
proposed: AtomicBool,
|
proposed: AtomicBool,
|
||||||
|
account_provider: Mutex<Option<Arc<AccountProvider>>>,
|
||||||
|
password: RwLock<Option<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> {
|
fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> {
|
||||||
UntrustedRlp::new(&header.seal()[0]).as_val()
|
UntrustedRlp::new(&header.seal().get(0).expect("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)")).as_val()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn header_signature(header: &Header) -> Result<Signature, ::rlp::DecoderError> {
|
fn header_signature(header: &Header) -> Result<Signature, ::rlp::DecoderError> {
|
||||||
UntrustedRlp::new(&header.seal()[1]).as_val::<H520>().map(Into::into)
|
UntrustedRlp::new(&header.seal().get(1).expect("was checked with verify_block_basic; has 2 fields; qed")).as_val::<H520>().map(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
trait AsMillis {
|
trait AsMillis {
|
||||||
@ -99,10 +103,12 @@ impl AuthorityRound {
|
|||||||
params: params,
|
params: params,
|
||||||
our_params: our_params,
|
our_params: our_params,
|
||||||
builtins: builtins,
|
builtins: builtins,
|
||||||
transition_service: try!(IoService::<BlockArrived>::start()),
|
transition_service: try!(IoService::<()>::start()),
|
||||||
message_channel: Mutex::new(None),
|
message_channel: Mutex::new(None),
|
||||||
step: AtomicUsize::new(initial_step),
|
step: AtomicUsize::new(initial_step),
|
||||||
proposed: AtomicBool::new(false)
|
proposed: AtomicBool::new(false),
|
||||||
|
account_provider: Mutex::new(None),
|
||||||
|
password: RwLock::new(None),
|
||||||
});
|
});
|
||||||
let handler = TransitionHandler { engine: Arc::downgrade(&engine) };
|
let handler = TransitionHandler { engine: Arc::downgrade(&engine) };
|
||||||
try!(engine.transition_service.register_handler(Arc::new(handler)));
|
try!(engine.transition_service.register_handler(Arc::new(handler)));
|
||||||
@ -141,20 +147,17 @@ struct TransitionHandler {
|
|||||||
engine: Weak<AuthorityRound>,
|
engine: Weak<AuthorityRound>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct BlockArrived;
|
|
||||||
|
|
||||||
const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
|
||||||
|
|
||||||
impl IoHandler<BlockArrived> for TransitionHandler {
|
impl IoHandler<()> for TransitionHandler {
|
||||||
fn initialize(&self, io: &IoContext<BlockArrived>) {
|
fn initialize(&self, io: &IoContext<()>) {
|
||||||
if let Some(engine) = self.engine.upgrade() {
|
if let Some(engine) = self.engine.upgrade() {
|
||||||
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, engine.remaining_step_duration().as_millis())
|
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, engine.remaining_step_duration().as_millis())
|
||||||
.unwrap_or_else(|e| warn!(target: "poa", "Failed to start consensus step timer: {}.", e))
|
.unwrap_or_else(|e| warn!(target: "poa", "Failed to start consensus step timer: {}.", e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&self, io: &IoContext<BlockArrived>, timer: TimerToken) {
|
fn timeout(&self, io: &IoContext<()>, timer: TimerToken) {
|
||||||
if timer == ENGINE_TIMEOUT_TOKEN {
|
if timer == ENGINE_TIMEOUT_TOKEN {
|
||||||
if let Some(engine) = self.engine.upgrade() {
|
if let Some(engine) = self.engine.upgrade() {
|
||||||
engine.step.fetch_add(1, AtomicOrdering::SeqCst);
|
engine.step.fetch_add(1, AtomicOrdering::SeqCst);
|
||||||
@ -206,10 +209,6 @@ impl Engine for AuthorityRound {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply the block reward on finalisation of the block.
|
|
||||||
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
|
|
||||||
fn on_close_block(&self, _block: &mut ExecutedBlock) {}
|
|
||||||
|
|
||||||
fn is_sealer(&self, author: &Address) -> Option<bool> {
|
fn is_sealer(&self, author: &Address) -> Option<bool> {
|
||||||
let p = &self.our_params;
|
let p = &self.our_params;
|
||||||
Some(p.authorities.contains(author))
|
Some(p.authorities.contains(author))
|
||||||
@ -219,14 +218,14 @@ impl Engine for AuthorityRound {
|
|||||||
///
|
///
|
||||||
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
|
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
|
||||||
/// be returned.
|
/// be returned.
|
||||||
fn generate_seal(&self, block: &ExecutedBlock, accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
|
fn generate_seal(&self, block: &ExecutedBlock) -> Option<Vec<Bytes>> {
|
||||||
if self.proposed.load(AtomicOrdering::SeqCst) { return None; }
|
if self.proposed.load(AtomicOrdering::SeqCst) { return None; }
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
let step = self.step();
|
let step = self.step();
|
||||||
if self.is_step_proposer(step, header.author()) {
|
if self.is_step_proposer(step, header.author()) {
|
||||||
if let Some(ap) = accounts {
|
if let Some(ref ap) = *self.account_provider.lock() {
|
||||||
// Account should be permanently unlocked, otherwise sealing will fail.
|
// Account should be permanently unlocked, otherwise sealing will fail.
|
||||||
if let Ok(signature) = ap.sign(*header.author(), None, header.bare_hash()) {
|
if let Ok(signature) = ap.sign(*header.author(), self.password.read().clone(), header.bare_hash()) {
|
||||||
trace!(target: "poa", "generate_seal: Issuing a block for step {}.", step);
|
trace!(target: "poa", "generate_seal: Issuing a block for step {}.", step);
|
||||||
self.proposed.store(true, AtomicOrdering::SeqCst);
|
self.proposed.store(true, AtomicOrdering::SeqCst);
|
||||||
return Some(vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
|
return Some(vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
|
||||||
@ -272,7 +271,6 @@ impl Engine for AuthorityRound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
|
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
|
||||||
// Don't calculate difficulty for genesis blocks.
|
|
||||||
if header.number() == 0 {
|
if header.number() == 0 {
|
||||||
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
|
return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
|
||||||
}
|
}
|
||||||
@ -284,10 +282,6 @@ impl Engine for AuthorityRound {
|
|||||||
try!(Err(BlockError::DoubleVote(header.author().clone())));
|
try!(Err(BlockError::DoubleVote(header.author().clone())));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check difficulty is correct given the two timestamps.
|
|
||||||
if header.difficulty() != parent.difficulty() {
|
|
||||||
return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: *parent.difficulty(), found: *header.difficulty() })))
|
|
||||||
}
|
|
||||||
let gas_limit_divisor = self.our_params.gas_limit_bound_divisor;
|
let gas_limit_divisor = self.our_params.gas_limit_bound_divisor;
|
||||||
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
|
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
|
||||||
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
|
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
|
||||||
@ -306,9 +300,29 @@ impl Engine for AuthorityRound {
|
|||||||
t.sender().map(|_|()) // Perform EC recovery and cache sender
|
t.sender().map(|_|()) // Perform EC recovery and cache sender
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn is_new_best_block(&self, _best_total_difficulty: U256, best_header: HeaderView, _parent_details: &BlockDetails, new_header: &HeaderView) -> bool {
|
||||||
|
let new_number = new_header.number();
|
||||||
|
let best_number = best_header.number();
|
||||||
|
if new_number != best_number {
|
||||||
|
new_number > best_number
|
||||||
|
} else {
|
||||||
|
// Take the oldest step at given height.
|
||||||
|
let new_step: usize = Rlp::new(&new_header.seal()[0]).as_val();
|
||||||
|
let best_step: usize = Rlp::new(&best_header.seal()[0]).as_val();
|
||||||
|
new_step < best_step
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn register_message_channel(&self, message_channel: IoChannel<ClientIoMessage>) {
|
fn register_message_channel(&self, message_channel: IoChannel<ClientIoMessage>) {
|
||||||
let mut guard = self.message_channel.lock();
|
*self.message_channel.lock() = Some(message_channel);
|
||||||
*guard = Some(message_channel);
|
}
|
||||||
|
|
||||||
|
fn set_signer(&self, _address: Address, password: String) {
|
||||||
|
*self.password.write() = Some(password);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn register_account_provider(&self, account_provider: Arc<AccountProvider>) {
|
||||||
|
*self.account_provider.lock() = Some(account_provider);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -377,12 +391,11 @@ mod tests {
|
|||||||
fn generates_seal_and_does_not_double_propose() {
|
fn generates_seal_and_does_not_double_propose() {
|
||||||
let tap = AccountProvider::transient_provider();
|
let tap = AccountProvider::transient_provider();
|
||||||
let addr1 = tap.insert_account("1".sha3(), "1").unwrap();
|
let addr1 = tap.insert_account("1".sha3(), "1").unwrap();
|
||||||
tap.unlock_account_permanently(addr1, "1".into()).unwrap();
|
|
||||||
let addr2 = tap.insert_account("2".sha3(), "2").unwrap();
|
let addr2 = tap.insert_account("2".sha3(), "2").unwrap();
|
||||||
tap.unlock_account_permanently(addr2, "2".into()).unwrap();
|
|
||||||
|
|
||||||
let spec = Spec::new_test_round();
|
let spec = Spec::new_test_round();
|
||||||
let engine = &*spec.engine;
|
let engine = &*spec.engine;
|
||||||
|
engine.register_account_provider(Arc::new(tap));
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let mut db1 = get_temp_state_db().take();
|
let mut db1 = get_temp_state_db().take();
|
||||||
spec.ensure_db_good(&mut db1, &TrieFactory::new(TrieSpec::Secure)).unwrap();
|
spec.ensure_db_good(&mut db1, &TrieFactory::new(TrieSpec::Secure)).unwrap();
|
||||||
@ -394,16 +407,18 @@ mod tests {
|
|||||||
let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b2 = b2.close_and_lock();
|
let b2 = b2.close_and_lock();
|
||||||
|
|
||||||
if let Some(seal) = engine.generate_seal(b1.block(), Some(&tap)) {
|
engine.set_signer(addr1, "1".into());
|
||||||
|
if let Some(seal) = engine.generate_seal(b1.block()) {
|
||||||
assert!(b1.clone().try_seal(engine, seal).is_ok());
|
assert!(b1.clone().try_seal(engine, seal).is_ok());
|
||||||
// Second proposal is forbidden.
|
// Second proposal is forbidden.
|
||||||
assert!(engine.generate_seal(b1.block(), Some(&tap)).is_none());
|
assert!(engine.generate_seal(b1.block()).is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(seal) = engine.generate_seal(b2.block(), Some(&tap)) {
|
engine.set_signer(addr2, "2".into());
|
||||||
|
if let Some(seal) = engine.generate_seal(b2.block()) {
|
||||||
assert!(b2.clone().try_seal(engine, seal).is_ok());
|
assert!(b2.clone().try_seal(engine, seal).is_ok());
|
||||||
// Second proposal is forbidden.
|
// Second proposal is forbidden.
|
||||||
assert!(engine.generate_seal(b2.block(), Some(&tap)).is_none());
|
assert!(engine.generate_seal(b2.block()).is_none());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,6 +58,8 @@ pub struct BasicAuthority {
|
|||||||
params: CommonParams,
|
params: CommonParams,
|
||||||
our_params: BasicAuthorityParams,
|
our_params: BasicAuthorityParams,
|
||||||
builtins: BTreeMap<Address, Builtin>,
|
builtins: BTreeMap<Address, Builtin>,
|
||||||
|
account_provider: Mutex<Option<Arc<AccountProvider>>>,
|
||||||
|
password: RwLock<Option<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BasicAuthority {
|
impl BasicAuthority {
|
||||||
@ -67,6 +69,8 @@ impl BasicAuthority {
|
|||||||
params: params,
|
params: params,
|
||||||
our_params: our_params,
|
our_params: our_params,
|
||||||
builtins: builtins,
|
builtins: builtins,
|
||||||
|
account_provider: Mutex::new(None),
|
||||||
|
password: RwLock::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -98,13 +102,8 @@ impl Engine for BasicAuthority {
|
|||||||
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
|
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply the block reward on finalisation of the block.
|
|
||||||
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
|
|
||||||
fn on_close_block(&self, _block: &mut ExecutedBlock) {}
|
|
||||||
|
|
||||||
fn is_sealer(&self, author: &Address) -> Option<bool> {
|
fn is_sealer(&self, author: &Address) -> Option<bool> {
|
||||||
Some(self.our_params.authorities.contains(author))
|
Some(self.our_params.authorities.contains(author))
|
||||||
}
|
}
|
||||||
@ -113,12 +112,12 @@ impl Engine for BasicAuthority {
|
|||||||
///
|
///
|
||||||
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
|
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
|
||||||
/// be returned.
|
/// be returned.
|
||||||
fn generate_seal(&self, block: &ExecutedBlock, accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
|
fn generate_seal(&self, block: &ExecutedBlock) -> Option<Vec<Bytes>> {
|
||||||
if let Some(ap) = accounts {
|
if let Some(ref ap) = *self.account_provider.lock() {
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
let message = header.bare_hash();
|
let message = header.bare_hash();
|
||||||
// account should be pernamently unlocked, otherwise sealing will fail
|
// account should be pernamently unlocked, otherwise sealing will fail
|
||||||
if let Ok(signature) = ap.sign(*block.header().author(), None, message) {
|
if let Ok(signature) = ap.sign(*block.header().author(), self.password.read().clone(), message) {
|
||||||
return Some(vec![::rlp::encode(&(&*signature as &[u8])).to_vec()]);
|
return Some(vec![::rlp::encode(&(&*signature as &[u8])).to_vec()]);
|
||||||
} else {
|
} else {
|
||||||
trace!(target: "basicauthority", "generate_seal: FAIL: accounts secret key unavailable");
|
trace!(target: "basicauthority", "generate_seal: FAIL: accounts secret key unavailable");
|
||||||
@ -179,6 +178,14 @@ impl Engine for BasicAuthority {
|
|||||||
fn verify_transaction(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> {
|
fn verify_transaction(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> {
|
||||||
t.sender().map(|_|()) // Perform EC recovery and cache sender
|
t.sender().map(|_|()) // Perform EC recovery and cache sender
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn set_signer(&self, _address: Address, password: String) {
|
||||||
|
*self.password.write() = Some(password);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn register_account_provider(&self, ap: Arc<AccountProvider>) {
|
||||||
|
*self.account_provider.lock() = Some(ap);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -250,10 +257,11 @@ mod tests {
|
|||||||
fn can_generate_seal() {
|
fn can_generate_seal() {
|
||||||
let tap = AccountProvider::transient_provider();
|
let tap = AccountProvider::transient_provider();
|
||||||
let addr = tap.insert_account("".sha3(), "").unwrap();
|
let addr = tap.insert_account("".sha3(), "").unwrap();
|
||||||
tap.unlock_account_permanently(addr, "".into()).unwrap();
|
|
||||||
|
|
||||||
let spec = new_test_authority();
|
let spec = new_test_authority();
|
||||||
let engine = &*spec.engine;
|
let engine = &*spec.engine;
|
||||||
|
engine.set_signer(addr, "".into());
|
||||||
|
engine.register_account_provider(Arc::new(tap));
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
let mut db_result = get_temp_state_db();
|
let mut db_result = get_temp_state_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
@ -261,7 +269,7 @@ mod tests {
|
|||||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||||
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
let seal = engine.generate_seal(b.block()).unwrap();
|
||||||
assert!(b.try_seal(engine, seal).is_ok());
|
assert!(b.try_seal(engine, seal).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ use spec::CommonParams;
|
|||||||
use evm::Schedule;
|
use evm::Schedule;
|
||||||
use block::ExecutedBlock;
|
use block::ExecutedBlock;
|
||||||
use util::Bytes;
|
use util::Bytes;
|
||||||
use account_provider::AccountProvider;
|
|
||||||
|
|
||||||
/// An engine which does not provide any consensus mechanism, just seals blocks internally.
|
/// An engine which does not provide any consensus mechanism, just seals blocks internally.
|
||||||
pub struct InstantSeal {
|
pub struct InstantSeal {
|
||||||
@ -60,7 +59,7 @@ impl Engine for InstantSeal {
|
|||||||
|
|
||||||
fn is_sealer(&self, _author: &Address) -> Option<bool> { Some(true) }
|
fn is_sealer(&self, _author: &Address) -> Option<bool> { Some(true) }
|
||||||
|
|
||||||
fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
|
fn generate_seal(&self, _block: &ExecutedBlock) -> Option<Vec<Bytes>> {
|
||||||
Some(Vec::new())
|
Some(Vec::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,16 +69,12 @@ mod tests {
|
|||||||
use util::*;
|
use util::*;
|
||||||
use util::trie::TrieSpec;
|
use util::trie::TrieSpec;
|
||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
use account_provider::AccountProvider;
|
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use block::*;
|
use block::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn instant_can_seal() {
|
fn instant_can_seal() {
|
||||||
let tap = AccountProvider::transient_provider();
|
|
||||||
let addr = tap.insert_account("".sha3(), "").unwrap();
|
|
||||||
|
|
||||||
let spec = Spec::new_instant();
|
let spec = Spec::new_instant();
|
||||||
let engine = &*spec.engine;
|
let engine = &*spec.engine;
|
||||||
let genesis_header = spec.genesis_header();
|
let genesis_header = spec.genesis_header();
|
||||||
@ -87,10 +82,9 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap();
|
spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap();
|
||||||
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
let last_hashes = Arc::new(vec![genesis_header.hash()]);
|
||||||
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
// Seal with empty AccountProvider.
|
let seal = engine.generate_seal(b.block()).unwrap();
|
||||||
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
|
||||||
assert!(b.try_seal(engine, seal).is_ok());
|
assert!(b.try_seal(engine, seal).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,6 +38,9 @@ use io::IoChannel;
|
|||||||
use service::ClientIoMessage;
|
use service::ClientIoMessage;
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use transaction::SignedTransaction;
|
use transaction::SignedTransaction;
|
||||||
|
use ethereum::ethash;
|
||||||
|
use blockchain::extras::BlockDetails;
|
||||||
|
use views::HeaderView;
|
||||||
|
|
||||||
/// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based.
|
/// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based.
|
||||||
/// Provides hooks into each of the major parts of block import.
|
/// Provides hooks into each of the major parts of block import.
|
||||||
@ -91,7 +94,7 @@ pub trait Engine : Sync + Send {
|
|||||||
///
|
///
|
||||||
/// This operation is synchronous and may (quite reasonably) not be available, in which None will
|
/// This operation is synchronous and may (quite reasonably) not be available, in which None will
|
||||||
/// be returned.
|
/// be returned.
|
||||||
fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> { None }
|
fn generate_seal(&self, _block: &ExecutedBlock) -> Option<Vec<Bytes>> { None }
|
||||||
|
|
||||||
/// Phase 1 quick block verification. Only does checks that are cheap. `block` (the header's full block)
|
/// Phase 1 quick block verification. Only does checks that are cheap. `block` (the header's full block)
|
||||||
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import.
|
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import.
|
||||||
@ -113,7 +116,7 @@ pub trait Engine : Sync + Send {
|
|||||||
fn verify_transaction(&self, _t: &SignedTransaction, _header: &Header) -> Result<(), Error> { Ok(()) }
|
fn verify_transaction(&self, _t: &SignedTransaction, _header: &Header) -> Result<(), Error> { Ok(()) }
|
||||||
|
|
||||||
/// The network ID that transactions should be signed with.
|
/// The network ID that transactions should be signed with.
|
||||||
fn signing_network_id(&self, _env_info: &EnvInfo) -> Option<u8> { None }
|
fn signing_network_id(&self, _env_info: &EnvInfo) -> Option<u64> { None }
|
||||||
|
|
||||||
/// Verify the seal of a block. This is an auxilliary method that actually just calls other `verify_` methods
|
/// Verify the seal of a block. This is an auxilliary method that actually just calls other `verify_` methods
|
||||||
/// to get the job done. By default it must pass `verify_basic` and `verify_block_unordered`. If more or fewer
|
/// to get the job done. By default it must pass `verify_basic` and `verify_block_unordered`. If more or fewer
|
||||||
@ -144,7 +147,17 @@ pub trait Engine : Sync + Send {
|
|||||||
self.builtins().get(a).expect("attempted to execute nonexistent builtin").execute(input, output);
|
self.builtins().get(a).expect("attempted to execute nonexistent builtin").execute(input, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if new block should be chosen as the one in chain.
|
||||||
|
fn is_new_best_block(&self, best_total_difficulty: U256, _best_header: HeaderView, parent_details: &BlockDetails, new_header: &HeaderView) -> bool {
|
||||||
|
ethash::is_new_best_block(best_total_difficulty, parent_details, new_header)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register an account which signs consensus messages.
|
||||||
|
fn set_signer(&self, _address: Address, _password: String) {}
|
||||||
|
|
||||||
/// Add a channel for communication with Client which can be used for sealing.
|
/// Add a channel for communication with Client which can be used for sealing.
|
||||||
fn register_message_channel(&self, _message_channel: IoChannel<ClientIoMessage>) {}
|
fn register_message_channel(&self, _message_channel: IoChannel<ClientIoMessage>) {}
|
||||||
// TODO: sealing stuff - though might want to leave this for later.
|
|
||||||
|
/// Add an account provider useful for Engines that sign stuff.
|
||||||
|
fn register_account_provider(&self, _account_provider: Arc<AccountProvider>) {}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,12 @@ impl NullEngine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for NullEngine {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new(Default::default(), Default::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Engine for NullEngine {
|
impl Engine for NullEngine {
|
||||||
fn name(&self) -> &str {
|
fn name(&self) -> &str {
|
||||||
"NullEngine"
|
"NullEngine"
|
||||||
|
@ -21,6 +21,7 @@ use builtin::Builtin;
|
|||||||
use env_info::EnvInfo;
|
use env_info::EnvInfo;
|
||||||
use error::{BlockError, TransactionError, Error};
|
use error::{BlockError, TransactionError, Error};
|
||||||
use header::Header;
|
use header::Header;
|
||||||
|
use views::HeaderView;
|
||||||
use state::CleanupMode;
|
use state::CleanupMode;
|
||||||
use spec::CommonParams;
|
use spec::CommonParams;
|
||||||
use transaction::SignedTransaction;
|
use transaction::SignedTransaction;
|
||||||
@ -28,6 +29,7 @@ use engines::Engine;
|
|||||||
use evm::Schedule;
|
use evm::Schedule;
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use rlp::{self, UntrustedRlp, View};
|
use rlp::{self, UntrustedRlp, View};
|
||||||
|
use blockchain::extras::BlockDetails;
|
||||||
|
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -163,9 +165,9 @@ impl Engine for Ethash {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self, env_info: &EnvInfo) -> Option<u8> {
|
fn signing_network_id(&self, env_info: &EnvInfo) -> Option<u64> {
|
||||||
if env_info.number >= self.ethash_params.eip155_transition && self.params().network_id < 127 {
|
if env_info.number >= self.ethash_params.eip155_transition {
|
||||||
Some(self.params().network_id as u8)
|
Some(self.params().network_id)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -314,7 +316,7 @@ impl Engine for Ethash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(n) = t.network_id() {
|
if let Some(n) = t.network_id() {
|
||||||
if header.number() < self.ethash_params.eip155_transition || n as usize != self.params().network_id {
|
if header.number() < self.ethash_params.eip155_transition || n != self.params().network_id {
|
||||||
return Err(TransactionError::InvalidNetworkId.into())
|
return Err(TransactionError::InvalidNetworkId.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -325,6 +327,15 @@ impl Engine for Ethash {
|
|||||||
fn verify_transaction(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> {
|
fn verify_transaction(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> {
|
||||||
t.sender().map(|_|()) // Perform EC recovery and cache sender
|
t.sender().map(|_|()) // Perform EC recovery and cache sender
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn is_new_best_block(&self, best_total_difficulty: U256, _best_header: HeaderView, parent_details: &BlockDetails, new_header: &HeaderView) -> bool {
|
||||||
|
is_new_best_block(best_total_difficulty, parent_details, new_header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a new block should replace the best blockchain block.
|
||||||
|
pub fn is_new_best_block(best_total_difficulty: U256, parent_details: &BlockDetails, new_header: &HeaderView) -> bool {
|
||||||
|
parent_details.total_difficulty + new_header.difficulty() > best_total_difficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||||
|
@ -19,7 +19,7 @@ use std::time::{Instant, Duration};
|
|||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use util::using_queue::{UsingQueue, GetAction};
|
use util::using_queue::{UsingQueue, GetAction};
|
||||||
use account_provider::AccountProvider;
|
use account_provider::{AccountProvider, Error as AccountError};
|
||||||
use views::{BlockView, HeaderView};
|
use views::{BlockView, HeaderView};
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use state::{State, CleanupMode};
|
use state::{State, CleanupMode};
|
||||||
@ -464,15 +464,12 @@ impl Miner {
|
|||||||
/// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed),
|
/// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed),
|
||||||
/// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine.
|
/// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine.
|
||||||
fn seal_block_internally(&self, block: ClosedBlock) -> Result<SealedBlock, Option<ClosedBlock>> {
|
fn seal_block_internally(&self, block: ClosedBlock) -> Result<SealedBlock, Option<ClosedBlock>> {
|
||||||
trace!(target: "miner", "seal_block_internally: block has transaction - attempting internal seal.");
|
trace!(target: "miner", "seal_block_internally: attempting internal seal.");
|
||||||
let s = self.engine.generate_seal(block.block(), match self.accounts {
|
let s = self.engine.generate_seal(block.block());
|
||||||
Some(ref x) => Some(&**x),
|
|
||||||
None => None,
|
|
||||||
});
|
|
||||||
if let Some(seal) = s {
|
if let Some(seal) = s {
|
||||||
trace!(target: "miner", "seal_block_internally: managed internal seal. importing...");
|
trace!(target: "miner", "seal_block_internally: managed internal seal. importing...");
|
||||||
block.lock().try_seal(&*self.engine, seal).or_else(|_| {
|
block.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| {
|
||||||
warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?");
|
warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal: {}", e);
|
||||||
Err(None)
|
Err(None)
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
@ -485,7 +482,7 @@ impl Miner {
|
|||||||
fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool {
|
fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool {
|
||||||
if !block.transactions().is_empty() || self.forced_sealing() {
|
if !block.transactions().is_empty() || self.forced_sealing() {
|
||||||
if let Ok(sealed) = self.seal_block_internally(block) {
|
if let Ok(sealed) = self.seal_block_internally(block) {
|
||||||
if chain.import_block(sealed.rlp_bytes()).is_ok() {
|
if chain.import_sealed_block(sealed).is_ok() {
|
||||||
trace!(target: "miner", "import_block_internally: imported internally sealed block");
|
trace!(target: "miner", "import_block_internally: imported internally sealed block");
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -740,6 +737,19 @@ impl MinerService for Miner {
|
|||||||
*self.author.write() = author;
|
*self.author.write() = author;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn set_engine_signer(&self, address: Address, password: String) -> Result<(), AccountError> {
|
||||||
|
if self.seals_internally {
|
||||||
|
if let Some(ref ap) = self.accounts {
|
||||||
|
try!(ap.sign(address.clone(), Some(password.clone()), Default::default()));
|
||||||
|
}
|
||||||
|
let mut sealing_work = self.sealing_work.lock();
|
||||||
|
sealing_work.enabled = self.engine.is_sealer(&address).unwrap_or(false);
|
||||||
|
*self.author.write() = address;
|
||||||
|
self.engine.set_signer(address, password);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn set_extra_data(&self, extra_data: Bytes) {
|
fn set_extra_data(&self, extra_data: Bytes) {
|
||||||
*self.extra_data.write() = extra_data;
|
*self.extra_data.write() = extra_data;
|
||||||
}
|
}
|
||||||
@ -1042,7 +1052,7 @@ impl MinerService for Miner {
|
|||||||
ret.map(f)
|
ret.map(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn submit_seal(&self, chain: &MiningBlockChainClient, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
|
fn submit_seal(&self, chain: &MiningBlockChainClient, block_hash: H256, seal: Vec<Bytes>) -> Result<(), Error> {
|
||||||
let result =
|
let result =
|
||||||
if let Some(b) = self.sealing_work.lock().queue.get_used_if(
|
if let Some(b) = self.sealing_work.lock().queue.get_used_if(
|
||||||
if self.options.enable_resubmission {
|
if self.options.enable_resubmission {
|
||||||
@ -1050,22 +1060,22 @@ impl MinerService for Miner {
|
|||||||
} else {
|
} else {
|
||||||
GetAction::Take
|
GetAction::Take
|
||||||
},
|
},
|
||||||
|b| &b.hash() == &pow_hash
|
|b| &b.hash() == &block_hash
|
||||||
) {
|
) {
|
||||||
trace!(target: "miner", "Sealing block {}={}={} with seal {:?}", pow_hash, b.hash(), b.header().bare_hash(), seal);
|
trace!(target: "miner", "Submitted block {}={}={} with seal {:?}", block_hash, b.hash(), b.header().bare_hash(), seal);
|
||||||
b.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| {
|
b.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| {
|
||||||
warn!(target: "miner", "Mined solution rejected: {}", e);
|
warn!(target: "miner", "Mined solution rejected: {}", e);
|
||||||
Err(Error::PowInvalid)
|
Err(Error::PowInvalid)
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
warn!(target: "miner", "Mined solution rejected: Block unknown or out of date.");
|
warn!(target: "miner", "Submitted solution rejected: Block unknown or out of date.");
|
||||||
Err(Error::PowHashInvalid)
|
Err(Error::PowHashInvalid)
|
||||||
};
|
};
|
||||||
result.and_then(|sealed| {
|
result.and_then(|sealed| {
|
||||||
let n = sealed.header().number();
|
let n = sealed.header().number();
|
||||||
let h = sealed.header().hash();
|
let h = sealed.header().hash();
|
||||||
try!(chain.import_sealed_block(sealed));
|
try!(chain.import_sealed_block(sealed));
|
||||||
info!(target: "miner", "Mined block imported OK. #{}: {}", Colour::White.bold().paint(format!("{}", n)), Colour::White.bold().paint(h.hex()));
|
info!(target: "miner", "Submitted block imported OK. #{}: {}", Colour::White.bold().paint(format!("{}", n)), Colour::White.bold().paint(h.hex()));
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -76,6 +76,9 @@ pub trait MinerService : Send + Sync {
|
|||||||
/// Set the author that we will seal blocks as.
|
/// Set the author that we will seal blocks as.
|
||||||
fn set_author(&self, author: Address);
|
fn set_author(&self, author: Address);
|
||||||
|
|
||||||
|
/// Set info necessary to sign consensus messages.
|
||||||
|
fn set_engine_signer(&self, address: Address, password: String) -> Result<(), ::account_provider::Error>;
|
||||||
|
|
||||||
/// Get the extra_data that we will seal blocks with.
|
/// Get the extra_data that we will seal blocks with.
|
||||||
fn extra_data(&self) -> Bytes;
|
fn extra_data(&self) -> Bytes;
|
||||||
|
|
||||||
|
@ -81,6 +81,7 @@ struct Restoration {
|
|||||||
struct RestorationParams<'a> {
|
struct RestorationParams<'a> {
|
||||||
manifest: ManifestData, // manifest to base restoration on.
|
manifest: ManifestData, // manifest to base restoration on.
|
||||||
pruning: Algorithm, // pruning algorithm for the database.
|
pruning: Algorithm, // pruning algorithm for the database.
|
||||||
|
engine: Arc<Engine>, // consensus engine of the chain.
|
||||||
db_path: PathBuf, // database path
|
db_path: PathBuf, // database path
|
||||||
db_config: &'a DatabaseConfig, // configuration for the database.
|
db_config: &'a DatabaseConfig, // configuration for the database.
|
||||||
writer: Option<LooseWriter>, // writer for recovered snapshot.
|
writer: Option<LooseWriter>, // writer for recovered snapshot.
|
||||||
@ -99,7 +100,7 @@ impl Restoration {
|
|||||||
let raw_db = Arc::new(try!(Database::open(params.db_config, &*params.db_path.to_string_lossy())
|
let raw_db = Arc::new(try!(Database::open(params.db_config, &*params.db_path.to_string_lossy())
|
||||||
.map_err(UtilError::SimpleString)));
|
.map_err(UtilError::SimpleString)));
|
||||||
|
|
||||||
let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone());
|
let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone(), params.engine);
|
||||||
let blocks = try!(BlockRebuilder::new(chain, raw_db.clone(), &manifest));
|
let blocks = try!(BlockRebuilder::new(chain, raw_db.clone(), &manifest));
|
||||||
|
|
||||||
let root = manifest.state_root.clone();
|
let root = manifest.state_root.clone();
|
||||||
@ -420,6 +421,7 @@ impl Service {
|
|||||||
let params = RestorationParams {
|
let params = RestorationParams {
|
||||||
manifest: manifest,
|
manifest: manifest,
|
||||||
pruning: self.pruning,
|
pruning: self.pruning,
|
||||||
|
engine: self.engine.clone(),
|
||||||
db_path: self.restoration_db(),
|
db_path: self.restoration_db(),
|
||||||
db_config: &self.db_config,
|
db_config: &self.db_config,
|
||||||
writer: writer,
|
writer: writer,
|
||||||
|
@ -37,13 +37,14 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
let genesis = canon_chain.generate(&mut finalizer).unwrap();
|
||||||
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
||||||
|
|
||||||
|
let engine = Arc::new(::engines::NullEngine::default());
|
||||||
let orig_path = RandomTempPath::create_dir();
|
let orig_path = RandomTempPath::create_dir();
|
||||||
let new_path = RandomTempPath::create_dir();
|
let new_path = RandomTempPath::create_dir();
|
||||||
let mut snapshot_path = new_path.as_path().to_owned();
|
let mut snapshot_path = new_path.as_path().to_owned();
|
||||||
snapshot_path.push("SNAP");
|
snapshot_path.push("SNAP");
|
||||||
|
|
||||||
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap());
|
let old_db = Arc::new(Database::open(&db_cfg, orig_path.as_str()).unwrap());
|
||||||
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone());
|
let bc = BlockChain::new(Default::default(), &genesis, old_db.clone(), engine.clone());
|
||||||
|
|
||||||
// build the blockchain.
|
// build the blockchain.
|
||||||
let mut batch = old_db.transaction();
|
let mut batch = old_db.transaction();
|
||||||
@ -73,21 +74,20 @@ fn chunk_and_restore(amount: u64) {
|
|||||||
|
|
||||||
// restore it.
|
// restore it.
|
||||||
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap());
|
let new_db = Arc::new(Database::open(&db_cfg, new_path.as_str()).unwrap());
|
||||||
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone());
|
let new_chain = BlockChain::new(Default::default(), &genesis, new_db.clone(), engine.clone());
|
||||||
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap();
|
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap();
|
||||||
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
||||||
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
|
|
||||||
let flag = AtomicBool::new(true);
|
let flag = AtomicBool::new(true);
|
||||||
for chunk_hash in &reader.manifest().block_hashes {
|
for chunk_hash in &reader.manifest().block_hashes {
|
||||||
let compressed = reader.chunk(*chunk_hash).unwrap();
|
let compressed = reader.chunk(*chunk_hash).unwrap();
|
||||||
let chunk = snappy::decompress(&compressed).unwrap();
|
let chunk = snappy::decompress(&compressed).unwrap();
|
||||||
rebuilder.feed(&chunk, &engine, &flag).unwrap();
|
rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
rebuilder.finalize(HashMap::new()).unwrap();
|
rebuilder.finalize(HashMap::new()).unwrap();
|
||||||
|
|
||||||
// and test it.
|
// and test it.
|
||||||
let new_chain = BlockChain::new(Default::default(), &genesis, new_db);
|
let new_chain = BlockChain::new(Default::default(), &genesis, new_db, engine);
|
||||||
assert_eq!(new_chain.best_block_hash(), best_hash);
|
assert_eq!(new_chain.best_block_hash(), best_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,8 +121,8 @@ fn checks_flag() {
|
|||||||
|
|
||||||
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
||||||
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
|
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
|
||||||
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
|
let engine = Arc::new(::engines::NullEngine::default());
|
||||||
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
|
let chain = BlockChain::new(Default::default(), &genesis, db.clone(), engine.clone());
|
||||||
|
|
||||||
let manifest = ::snapshot::ManifestData {
|
let manifest = ::snapshot::ManifestData {
|
||||||
state_hashes: Vec::new(),
|
state_hashes: Vec::new(),
|
||||||
@ -134,7 +134,7 @@ fn checks_flag() {
|
|||||||
|
|
||||||
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap();
|
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap();
|
||||||
|
|
||||||
match rebuilder.feed(&chunk, &engine, &AtomicBool::new(false)) {
|
match rebuilder.feed(&chunk, engine.as_ref(), &AtomicBool::new(false)) {
|
||||||
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}
|
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}
|
||||||
_ => panic!("Wrong result on abort flag set")
|
_ => panic!("Wrong result on abort flag set")
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,9 @@ pub struct Ethereum {
|
|||||||
|
|
||||||
impl Into<Generic> for Ethereum {
|
impl Into<Generic> for Ethereum {
|
||||||
fn into(self) -> Generic {
|
fn into(self) -> Generic {
|
||||||
let mut s = RlpStream::new();
|
let mut s = RlpStream::new_list(2);
|
||||||
s.append(&self.mix_hash);
|
s.append(&self.mix_hash).append(&self.nonce);
|
||||||
s.append(&self.nonce);
|
|
||||||
Generic {
|
Generic {
|
||||||
fields: 2,
|
|
||||||
rlp: s.out()
|
rlp: s.out()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -42,8 +40,6 @@ impl Into<Generic> for Ethereum {
|
|||||||
|
|
||||||
/// Generic seal.
|
/// Generic seal.
|
||||||
pub struct Generic {
|
pub struct Generic {
|
||||||
/// Number of seal fields.
|
|
||||||
pub fields: usize,
|
|
||||||
/// Seal rlp.
|
/// Seal rlp.
|
||||||
pub rlp: Vec<u8>,
|
pub rlp: Vec<u8>,
|
||||||
}
|
}
|
||||||
@ -64,7 +60,6 @@ impl From<ethjson::spec::Seal> for Seal {
|
|||||||
mix_hash: eth.mix_hash.into()
|
mix_hash: eth.mix_hash.into()
|
||||||
}),
|
}),
|
||||||
ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic {
|
ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic {
|
||||||
fields: g.fields,
|
|
||||||
rlp: g.rlp.into()
|
rlp: g.rlp.into()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -30,15 +30,14 @@ use ethjson;
|
|||||||
use rlp::{Rlp, RlpStream, View, Stream};
|
use rlp::{Rlp, RlpStream, View, Stream};
|
||||||
|
|
||||||
/// Parameters common to all engines.
|
/// Parameters common to all engines.
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone, Default)]
|
||||||
#[cfg_attr(test, derive(Default))]
|
|
||||||
pub struct CommonParams {
|
pub struct CommonParams {
|
||||||
/// Account start nonce.
|
/// Account start nonce.
|
||||||
pub account_start_nonce: U256,
|
pub account_start_nonce: U256,
|
||||||
/// Maximum size of extra data.
|
/// Maximum size of extra data.
|
||||||
pub maximum_extra_data_size: usize,
|
pub maximum_extra_data_size: usize,
|
||||||
/// Network id.
|
/// Network id.
|
||||||
pub network_id: usize,
|
pub network_id: u64,
|
||||||
/// Main subprotocol name.
|
/// Main subprotocol name.
|
||||||
pub subprotocol_name: String,
|
pub subprotocol_name: String,
|
||||||
/// Minimum gas limit.
|
/// Minimum gas limit.
|
||||||
@ -94,8 +93,6 @@ pub struct Spec {
|
|||||||
pub receipts_root: H256,
|
pub receipts_root: H256,
|
||||||
/// The genesis block's extra data field.
|
/// The genesis block's extra data field.
|
||||||
pub extra_data: Bytes,
|
pub extra_data: Bytes,
|
||||||
/// The number of seal fields in the genesis block.
|
|
||||||
pub seal_fields: usize,
|
|
||||||
/// Each seal field, expressed as RLP, concatenated.
|
/// Each seal field, expressed as RLP, concatenated.
|
||||||
pub seal_rlp: Bytes,
|
pub seal_rlp: Bytes,
|
||||||
|
|
||||||
@ -127,7 +124,6 @@ impl From<ethjson::spec::Spec> for Spec {
|
|||||||
gas_used: g.gas_used,
|
gas_used: g.gas_used,
|
||||||
timestamp: g.timestamp,
|
timestamp: g.timestamp,
|
||||||
extra_data: g.extra_data,
|
extra_data: g.extra_data,
|
||||||
seal_fields: seal.fields,
|
|
||||||
seal_rlp: seal.rlp,
|
seal_rlp: seal.rlp,
|
||||||
state_root_memo: RwLock::new(g.state_root),
|
state_root_memo: RwLock::new(g.state_root),
|
||||||
genesis_state: From::from(s.accounts),
|
genesis_state: From::from(s.accounts),
|
||||||
@ -167,7 +163,7 @@ impl Spec {
|
|||||||
pub fn nodes(&self) -> &[String] { &self.nodes }
|
pub fn nodes(&self) -> &[String] { &self.nodes }
|
||||||
|
|
||||||
/// Get the configured Network ID.
|
/// Get the configured Network ID.
|
||||||
pub fn network_id(&self) -> usize { self.params.network_id }
|
pub fn network_id(&self) -> u64 { self.params.network_id }
|
||||||
|
|
||||||
/// Get the configured subprotocol name.
|
/// Get the configured subprotocol name.
|
||||||
pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() }
|
pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() }
|
||||||
@ -192,13 +188,8 @@ impl Spec {
|
|||||||
header.set_gas_limit(self.gas_limit.clone());
|
header.set_gas_limit(self.gas_limit.clone());
|
||||||
header.set_difficulty(self.difficulty.clone());
|
header.set_difficulty(self.difficulty.clone());
|
||||||
header.set_seal({
|
header.set_seal({
|
||||||
let seal = {
|
let r = Rlp::new(&self.seal_rlp);
|
||||||
let mut s = RlpStream::new_list(self.seal_fields);
|
r.iter().map(|f| f.as_raw().to_vec()).collect()
|
||||||
s.append_raw(&self.seal_rlp, self.seal_fields);
|
|
||||||
s.out()
|
|
||||||
};
|
|
||||||
let r = Rlp::new(&seal);
|
|
||||||
(0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect()
|
|
||||||
});
|
});
|
||||||
trace!(target: "spec", "Header hash is {}", header.hash());
|
trace!(target: "spec", "Header hash is {}", header.hash());
|
||||||
header
|
header
|
||||||
@ -227,7 +218,6 @@ impl Spec {
|
|||||||
self.gas_used = g.gas_used;
|
self.gas_used = g.gas_used;
|
||||||
self.timestamp = g.timestamp;
|
self.timestamp = g.timestamp;
|
||||||
self.extra_data = g.extra_data;
|
self.extra_data = g.extra_data;
|
||||||
self.seal_fields = seal.fields;
|
|
||||||
self.seal_rlp = seal.rlp;
|
self.seal_rlp = seal.rlp;
|
||||||
self.state_root_memo = RwLock::new(g.state_root);
|
self.state_root_memo = RwLock::new(g.state_root);
|
||||||
}
|
}
|
||||||
|
@ -436,6 +436,27 @@ impl Account {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// light client storage proof.
|
||||||
|
impl Account {
|
||||||
|
/// Prove a storage key's existence or nonexistence in the account's storage
|
||||||
|
/// trie.
|
||||||
|
/// `storage_key` is the hash of the desired storage key, meaning
|
||||||
|
/// this will only work correctly under a secure trie.
|
||||||
|
/// Returns a merkle proof of the storage trie node with all nodes before `from_level`
|
||||||
|
/// omitted.
|
||||||
|
pub fn prove_storage(&self, db: &HashDB, storage_key: H256, from_level: u32) -> Result<Vec<Bytes>, Box<TrieError>> {
|
||||||
|
use util::trie::{Trie, TrieDB};
|
||||||
|
use util::trie::recorder::{Recorder, BasicRecorder as TrieRecorder};
|
||||||
|
|
||||||
|
let mut recorder = TrieRecorder::with_depth(from_level);
|
||||||
|
|
||||||
|
let trie = try!(TrieDB::new(db, &self.storage_root));
|
||||||
|
let _ = try!(trie.get_recorded(&storage_key, &mut recorder));
|
||||||
|
|
||||||
|
Ok(recorder.drain().into_iter().map(|r| r.data).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Debug for Account {
|
impl fmt::Debug for Account {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "{:?}", PodAccount::from_account(self))
|
write!(f, "{:?}", PodAccount::from_account(self))
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -286,7 +286,7 @@ fn new_db(path: &str) -> Arc<Database> {
|
|||||||
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> {
|
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> {
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
|
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone(), Spec::new_null().engine);
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
for block_order in 1..block_number {
|
for block_order in 1..block_number {
|
||||||
@ -304,7 +304,7 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockCh
|
|||||||
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> {
|
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> {
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
|
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone(), Spec::new_null().engine);
|
||||||
|
|
||||||
|
|
||||||
let mut batch = db.transaction();
|
let mut batch = db.transaction();
|
||||||
@ -323,7 +323,7 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
|
|||||||
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
|
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
|
||||||
let temp = RandomTempPath::new();
|
let temp = RandomTempPath::new();
|
||||||
let db = new_db(temp.as_str());
|
let db = new_db(temp.as_str());
|
||||||
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone());
|
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone(), Spec::new_null().engine);
|
||||||
|
|
||||||
GuardedTempResult::<BlockChain> {
|
GuardedTempResult::<BlockChain> {
|
||||||
_temp: temp,
|
_temp: temp,
|
||||||
|
@ -34,3 +34,4 @@ pub mod block_import_error;
|
|||||||
pub mod restoration_status;
|
pub mod restoration_status;
|
||||||
pub mod snapshot_manifest;
|
pub mod snapshot_manifest;
|
||||||
pub mod mode;
|
pub mod mode;
|
||||||
|
pub mod pruning_info;
|
30
ethcore/src/types/pruning_info.rs
Normal file
30
ethcore/src/types/pruning_info.rs
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Information about portions of the state and chain which the client may serve.
|
||||||
|
//!
|
||||||
|
//! Currently assumes that a client will store everything past a certain point
|
||||||
|
//! or everything. Will be extended in the future to support a definition
|
||||||
|
//! of which portions of the ancient chain and current state trie are stored as well.
|
||||||
|
|
||||||
|
/// Client pruning info. See module-level docs for more details.
|
||||||
|
#[derive(Debug, Clone, Binary)]
|
||||||
|
pub struct PruningInfo {
|
||||||
|
/// The first block which everything can be served after.
|
||||||
|
pub earliest_chain: u64,
|
||||||
|
/// The first block where state requests may be served.
|
||||||
|
pub earliest_state: u64,
|
||||||
|
}
|
@ -72,7 +72,7 @@ pub struct Transaction {
|
|||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
/// Append object with a without signature into RLP stream
|
/// Append object with a without signature into RLP stream
|
||||||
pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, network_id: Option<u8>) {
|
pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, network_id: Option<u64>) {
|
||||||
s.begin_list(if network_id.is_none() { 6 } else { 9 });
|
s.begin_list(if network_id.is_none() { 6 } else { 9 });
|
||||||
s.append(&self.nonce);
|
s.append(&self.nonce);
|
||||||
s.append(&self.gas_price);
|
s.append(&self.gas_price);
|
||||||
@ -140,26 +140,26 @@ impl From<ethjson::transaction::Transaction> for SignedTransaction {
|
|||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
/// The message hash of the transaction.
|
/// The message hash of the transaction.
|
||||||
pub fn hash(&self, network_id: Option<u8>) -> H256 {
|
pub fn hash(&self, network_id: Option<u64>) -> H256 {
|
||||||
let mut stream = RlpStream::new();
|
let mut stream = RlpStream::new();
|
||||||
self.rlp_append_unsigned_transaction(&mut stream, network_id);
|
self.rlp_append_unsigned_transaction(&mut stream, network_id);
|
||||||
stream.out().sha3()
|
stream.out().sha3()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signs the transaction as coming from `sender`.
|
/// Signs the transaction as coming from `sender`.
|
||||||
pub fn sign(self, secret: &Secret, network_id: Option<u8>) -> SignedTransaction {
|
pub fn sign(self, secret: &Secret, network_id: Option<u64>) -> SignedTransaction {
|
||||||
let sig = ::ethkey::sign(secret, &self.hash(network_id))
|
let sig = ::ethkey::sign(secret, &self.hash(network_id))
|
||||||
.expect("data is valid and context has signing capabilities; qed");
|
.expect("data is valid and context has signing capabilities; qed");
|
||||||
self.with_signature(sig, network_id)
|
self.with_signature(sig, network_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signs the transaction with signature.
|
/// Signs the transaction with signature.
|
||||||
pub fn with_signature(self, sig: Signature, network_id: Option<u8>) -> SignedTransaction {
|
pub fn with_signature(self, sig: Signature, network_id: Option<u64>) -> SignedTransaction {
|
||||||
SignedTransaction {
|
SignedTransaction {
|
||||||
unsigned: self,
|
unsigned: self,
|
||||||
r: sig.r().into(),
|
r: sig.r().into(),
|
||||||
s: sig.s().into(),
|
s: sig.s().into(),
|
||||||
v: sig.v() + if let Some(n) = network_id { 35 + n * 2 } else { 27 },
|
v: sig.v() as u64 + if let Some(n) = network_id { 35 + n * 2 } else { 27 },
|
||||||
hash: Cell::new(None),
|
hash: Cell::new(None),
|
||||||
sender: Cell::new(None),
|
sender: Cell::new(None),
|
||||||
}
|
}
|
||||||
@ -211,7 +211,7 @@ pub struct SignedTransaction {
|
|||||||
unsigned: Transaction,
|
unsigned: Transaction,
|
||||||
/// The V field of the signature; the LS bit described which half of the curve our point falls
|
/// The V field of the signature; the LS bit described which half of the curve our point falls
|
||||||
/// in. The MS bits describe which network this transaction is for. If 27/28, its for all networks.
|
/// in. The MS bits describe which network this transaction is for. If 27/28, its for all networks.
|
||||||
v: u8,
|
v: u64,
|
||||||
/// The R field of the signature; helps describe the point on the curve.
|
/// The R field of the signature; helps describe the point on the curve.
|
||||||
r: U256,
|
r: U256,
|
||||||
/// The S field of the signature; helps describe the point on the curve.
|
/// The S field of the signature; helps describe the point on the curve.
|
||||||
@ -302,10 +302,13 @@ impl SignedTransaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// 0 if `v` would have been 27 under "Electrum" notation, 1 if 28 or 4 if invalid.
|
/// 0 if `v` would have been 27 under "Electrum" notation, 1 if 28 or 4 if invalid.
|
||||||
pub fn standard_v(&self) -> u8 { match self.v { v if v == 27 || v == 28 || v > 36 => (v - 1) % 2, _ => 4 } }
|
pub fn standard_v(&self) -> u8 { match self.v { v if v == 27 || v == 28 || v > 36 => ((v - 1) % 2) as u8, _ => 4 } }
|
||||||
|
|
||||||
|
/// The `v` value that appears in the RLP.
|
||||||
|
pub fn original_v(&self) -> u64 { self.v }
|
||||||
|
|
||||||
/// The network ID, or `None` if this is a global transaction.
|
/// The network ID, or `None` if this is a global transaction.
|
||||||
pub fn network_id(&self) -> Option<u8> {
|
pub fn network_id(&self) -> Option<u64> {
|
||||||
match self.v {
|
match self.v {
|
||||||
v if v > 36 => Some((v - 35) / 2),
|
v if v > 36 => Some((v - 35) / 2),
|
||||||
_ => None,
|
_ => None,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
|
//! A queue of blocks. Sits between network or other I/O and the `BlockChain`.
|
||||||
//! Sorts them ready for blockchain insertion.
|
//! Sorts them ready for blockchain insertion.
|
||||||
|
|
||||||
use std::thread::{JoinHandle, self};
|
use std::thread::{self, JoinHandle};
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering};
|
||||||
use std::sync::{Condvar as SCondvar, Mutex as SMutex};
|
use std::sync::{Condvar as SCondvar, Mutex as SMutex};
|
||||||
use util::*;
|
use util::*;
|
||||||
@ -53,6 +53,8 @@ pub struct Config {
|
|||||||
/// Maximum heap memory to use.
|
/// Maximum heap memory to use.
|
||||||
/// When the limit is reached, is_full returns true.
|
/// When the limit is reached, is_full returns true.
|
||||||
pub max_mem_use: usize,
|
pub max_mem_use: usize,
|
||||||
|
/// Settings for the number of verifiers and adaptation strategy.
|
||||||
|
pub verifier_settings: VerifierSettings,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -60,39 +62,35 @@ impl Default for Config {
|
|||||||
Config {
|
Config {
|
||||||
max_queue_size: 30000,
|
max_queue_size: 30000,
|
||||||
max_mem_use: 50 * 1024 * 1024,
|
max_mem_use: 50 * 1024 * 1024,
|
||||||
|
verifier_settings: VerifierSettings::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct VerifierHandle {
|
/// Verifier settings.
|
||||||
deleting: Arc<AtomicBool>,
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
sleep: Arc<AtomicBool>,
|
pub struct VerifierSettings {
|
||||||
thread: JoinHandle<()>,
|
/// Whether to scale amount of verifiers according to load.
|
||||||
|
// Todo: replace w/ strategy enum?
|
||||||
|
pub scale_verifiers: bool,
|
||||||
|
/// Beginning amount of verifiers.
|
||||||
|
pub num_verifiers: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VerifierHandle {
|
impl Default for VerifierSettings {
|
||||||
// signal to the verifier thread that it should sleep.
|
fn default() -> Self {
|
||||||
fn sleep(&self) {
|
VerifierSettings {
|
||||||
self.sleep.store(true, AtomicOrdering::SeqCst);
|
scale_verifiers: false,
|
||||||
|
num_verifiers: MAX_VERIFIERS,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// signal to the verifier thread that it should wake up.
|
// pool states
|
||||||
fn wake_up(&self) {
|
enum State {
|
||||||
self.sleep.store(false, AtomicOrdering::SeqCst);
|
// all threads with id < inner value are to work.
|
||||||
self.thread.thread().unpark();
|
Work(usize),
|
||||||
}
|
Exit,
|
||||||
|
|
||||||
// signal to the verifier thread that it should conclude its
|
|
||||||
// operations.
|
|
||||||
fn conclude(&self) {
|
|
||||||
self.wake_up();
|
|
||||||
self.deleting.store(true, AtomicOrdering::Release);
|
|
||||||
}
|
|
||||||
|
|
||||||
// join the verifier thread.
|
|
||||||
fn join(self) {
|
|
||||||
self.thread.join().expect("Verifier thread panicked");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An item which is in the process of being verified.
|
/// An item which is in the process of being verified.
|
||||||
@ -131,7 +129,6 @@ pub struct VerificationQueue<K: Kind> {
|
|||||||
engine: Arc<Engine>,
|
engine: Arc<Engine>,
|
||||||
more_to_verify: Arc<SCondvar>,
|
more_to_verify: Arc<SCondvar>,
|
||||||
verification: Arc<Verification<K>>,
|
verification: Arc<Verification<K>>,
|
||||||
verifiers: Mutex<(Vec<VerifierHandle>, usize)>,
|
|
||||||
deleting: Arc<AtomicBool>,
|
deleting: Arc<AtomicBool>,
|
||||||
ready_signal: Arc<QueueSignal>,
|
ready_signal: Arc<QueueSignal>,
|
||||||
empty: Arc<SCondvar>,
|
empty: Arc<SCondvar>,
|
||||||
@ -139,6 +136,9 @@ pub struct VerificationQueue<K: Kind> {
|
|||||||
ticks_since_adjustment: AtomicUsize,
|
ticks_since_adjustment: AtomicUsize,
|
||||||
max_queue_size: usize,
|
max_queue_size: usize,
|
||||||
max_mem_use: usize,
|
max_mem_use: usize,
|
||||||
|
scale_verifiers: bool,
|
||||||
|
verifier_handles: Vec<JoinHandle<()>>,
|
||||||
|
state: Arc<(Mutex<State>, Condvar)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct QueueSignal {
|
struct QueueSignal {
|
||||||
@ -221,43 +221,45 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
});
|
});
|
||||||
let empty = Arc::new(SCondvar::new());
|
let empty = Arc::new(SCondvar::new());
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
let scale_verifiers = config.verifier_settings.scale_verifiers;
|
||||||
|
|
||||||
let max_verifiers = min(::num_cpus::get(), MAX_VERIFIERS);
|
let num_cpus = ::num_cpus::get();
|
||||||
let default_amount = max(::num_cpus::get(), 3) - 2;
|
let max_verifiers = min(num_cpus, MAX_VERIFIERS);
|
||||||
let mut verifiers = Vec::with_capacity(max_verifiers);
|
let default_amount = max(1, min(max_verifiers, config.verifier_settings.num_verifiers));
|
||||||
|
let state = Arc::new((Mutex::new(State::Work(default_amount)), Condvar::new()));
|
||||||
|
let mut verifier_handles = Vec::with_capacity(max_verifiers);
|
||||||
|
|
||||||
debug!(target: "verification", "Allocating {} verifiers, {} initially active", max_verifiers, default_amount);
|
debug!(target: "verification", "Allocating {} verifiers, {} initially active", max_verifiers, default_amount);
|
||||||
|
debug!(target: "verification", "Verifier auto-scaling {}", if scale_verifiers { "enabled" } else { "disabled" });
|
||||||
|
|
||||||
for i in 0..max_verifiers {
|
for i in 0..max_verifiers {
|
||||||
debug!(target: "verification", "Adding verification thread #{}", i);
|
debug!(target: "verification", "Adding verification thread #{}", i);
|
||||||
|
|
||||||
let deleting = deleting.clone();
|
|
||||||
let panic_handler = panic_handler.clone();
|
let panic_handler = panic_handler.clone();
|
||||||
let verification = verification.clone();
|
let verification = verification.clone();
|
||||||
let engine = engine.clone();
|
let engine = engine.clone();
|
||||||
let wait = more_to_verify.clone();
|
let wait = more_to_verify.clone();
|
||||||
let ready = ready_signal.clone();
|
let ready = ready_signal.clone();
|
||||||
let empty = empty.clone();
|
let empty = empty.clone();
|
||||||
|
let state = state.clone();
|
||||||
|
|
||||||
// enable only the first few verifiers.
|
let handle = thread::Builder::new()
|
||||||
let sleep = if i < default_amount {
|
.name(format!("Verifier #{}", i))
|
||||||
Arc::new(AtomicBool::new(false))
|
.spawn(move || {
|
||||||
} else {
|
panic_handler.catch_panic(move || {
|
||||||
Arc::new(AtomicBool::new(true))
|
VerificationQueue::verify(
|
||||||
};
|
verification,
|
||||||
|
engine,
|
||||||
verifiers.push(VerifierHandle {
|
wait,
|
||||||
deleting: deleting.clone(),
|
ready,
|
||||||
sleep: sleep.clone(),
|
empty,
|
||||||
thread: thread::Builder::new()
|
state,
|
||||||
.name(format!("Verifier #{}", i))
|
i,
|
||||||
.spawn(move || {
|
)
|
||||||
panic_handler.catch_panic(move || {
|
}).unwrap()
|
||||||
VerificationQueue::verify(verification, engine, wait, ready, deleting, empty, sleep)
|
})
|
||||||
}).unwrap()
|
.expect("Failed to create verifier thread.");
|
||||||
})
|
verifier_handles.push(handle);
|
||||||
.expect("Failed to create verifier thread.")
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VerificationQueue {
|
VerificationQueue {
|
||||||
@ -266,13 +268,15 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
ready_signal: ready_signal,
|
ready_signal: ready_signal,
|
||||||
more_to_verify: more_to_verify,
|
more_to_verify: more_to_verify,
|
||||||
verification: verification,
|
verification: verification,
|
||||||
verifiers: Mutex::new((verifiers, default_amount)),
|
|
||||||
deleting: deleting,
|
deleting: deleting,
|
||||||
processing: RwLock::new(HashSet::new()),
|
processing: RwLock::new(HashSet::new()),
|
||||||
empty: empty,
|
empty: empty,
|
||||||
ticks_since_adjustment: AtomicUsize::new(0),
|
ticks_since_adjustment: AtomicUsize::new(0),
|
||||||
max_queue_size: max(config.max_queue_size, MIN_QUEUE_LIMIT),
|
max_queue_size: max(config.max_queue_size, MIN_QUEUE_LIMIT),
|
||||||
max_mem_use: max(config.max_mem_use, MIN_MEM_LIMIT),
|
max_mem_use: max(config.max_mem_use, MIN_MEM_LIMIT),
|
||||||
|
scale_verifiers: scale_verifiers,
|
||||||
|
verifier_handles: verifier_handles,
|
||||||
|
state: state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,23 +285,30 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
engine: Arc<Engine>,
|
engine: Arc<Engine>,
|
||||||
wait: Arc<SCondvar>,
|
wait: Arc<SCondvar>,
|
||||||
ready: Arc<QueueSignal>,
|
ready: Arc<QueueSignal>,
|
||||||
deleting: Arc<AtomicBool>,
|
|
||||||
empty: Arc<SCondvar>,
|
empty: Arc<SCondvar>,
|
||||||
sleep: Arc<AtomicBool>,
|
state: Arc<(Mutex<State>, Condvar)>,
|
||||||
|
id: usize,
|
||||||
) {
|
) {
|
||||||
while !deleting.load(AtomicOrdering::Acquire) {
|
loop {
|
||||||
|
// check current state.
|
||||||
{
|
{
|
||||||
while sleep.load(AtomicOrdering::SeqCst) {
|
let mut cur_state = state.0.lock();
|
||||||
trace!(target: "verification", "Verifier sleeping");
|
while let State::Work(x) = *cur_state {
|
||||||
::std::thread::park();
|
// sleep until this thread is required.
|
||||||
trace!(target: "verification", "Verifier waking up");
|
if id < x { break }
|
||||||
|
|
||||||
if deleting.load(AtomicOrdering::Acquire) {
|
debug!(target: "verification", "verifier {} sleeping", id);
|
||||||
return;
|
state.1.wait(&mut cur_state);
|
||||||
}
|
debug!(target: "verification", "verifier {} waking up", id);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let State::Exit = *cur_state {
|
||||||
|
debug!(target: "verification", "verifier {} exiting", id);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wait for work if empty.
|
||||||
{
|
{
|
||||||
let mut more_to_verify = verification.more_to_verify.lock().unwrap();
|
let mut more_to_verify = verification.more_to_verify.lock().unwrap();
|
||||||
|
|
||||||
@ -305,15 +316,22 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
empty.notify_all();
|
empty.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
while verification.unverified.lock().is_empty() && !deleting.load(AtomicOrdering::Acquire) {
|
while verification.unverified.lock().is_empty() {
|
||||||
|
if let State::Exit = *state.0.lock() {
|
||||||
|
debug!(target: "verification", "verifier {} exiting", id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
more_to_verify = wait.wait(more_to_verify).unwrap();
|
more_to_verify = wait.wait(more_to_verify).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
if deleting.load(AtomicOrdering::Acquire) {
|
if let State::Exit = *state.0.lock() {
|
||||||
|
debug!(target: "verification", "verifier {} exiting", id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// do work.
|
||||||
let item = {
|
let item = {
|
||||||
// acquire these locks before getting the item to verify.
|
// acquire these locks before getting the item to verify.
|
||||||
let mut unverified = verification.unverified.lock();
|
let mut unverified = verification.unverified.lock();
|
||||||
@ -568,6 +586,14 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the current number of working verifiers.
|
||||||
|
pub fn num_verifiers(&self) -> usize {
|
||||||
|
match *self.state.0.lock() {
|
||||||
|
State::Work(x) => x,
|
||||||
|
State::Exit => panic!("state only set to exit on drop; queue live now; qed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Optimise memory footprint of the heap fields, and adjust the number of threads
|
/// Optimise memory footprint of the heap fields, and adjust the number of threads
|
||||||
/// to better suit the workload.
|
/// to better suit the workload.
|
||||||
pub fn collect_garbage(&self) {
|
pub fn collect_garbage(&self) {
|
||||||
@ -598,13 +624,15 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
|
|
||||||
self.processing.write().shrink_to_fit();
|
self.processing.write().shrink_to_fit();
|
||||||
|
|
||||||
|
if !self.scale_verifiers { return }
|
||||||
|
|
||||||
if self.ticks_since_adjustment.fetch_add(1, AtomicOrdering::SeqCst) + 1 >= READJUSTMENT_PERIOD {
|
if self.ticks_since_adjustment.fetch_add(1, AtomicOrdering::SeqCst) + 1 >= READJUSTMENT_PERIOD {
|
||||||
self.ticks_since_adjustment.store(0, AtomicOrdering::SeqCst);
|
self.ticks_since_adjustment.store(0, AtomicOrdering::SeqCst);
|
||||||
} else {
|
} else {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let current = self.verifiers.lock().1;
|
let current = self.num_verifiers();
|
||||||
|
|
||||||
let diff = (v_len - u_len).abs();
|
let diff = (v_len - u_len).abs();
|
||||||
let total = v_len + u_len;
|
let total = v_len + u_len;
|
||||||
@ -626,27 +654,14 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
// possible, never going over the amount of initially allocated threads
|
// possible, never going over the amount of initially allocated threads
|
||||||
// or below 1.
|
// or below 1.
|
||||||
fn scale_verifiers(&self, target: usize) {
|
fn scale_verifiers(&self, target: usize) {
|
||||||
let mut verifiers = self.verifiers.lock();
|
let current = self.num_verifiers();
|
||||||
let &mut (ref mut verifiers, ref mut verifier_count) = &mut *verifiers;
|
let target = min(self.verifier_handles.len(), target);
|
||||||
|
|
||||||
let target = min(verifiers.len(), target);
|
|
||||||
let target = max(1, target);
|
let target = max(1, target);
|
||||||
|
|
||||||
debug!(target: "verification", "Scaling from {} to {} verifiers", verifier_count, target);
|
debug!(target: "verification", "Scaling from {} to {} verifiers", current, target);
|
||||||
|
|
||||||
// scaling up
|
*self.state.0.lock() = State::Work(target);
|
||||||
for i in *verifier_count..target {
|
self.state.1.notify_all();
|
||||||
debug!(target: "verification", "Waking up verifier {}", i);
|
|
||||||
verifiers[i].wake_up();
|
|
||||||
}
|
|
||||||
|
|
||||||
// scaling down.
|
|
||||||
for i in target..*verifier_count {
|
|
||||||
debug!(target: "verification", "Putting verifier {} to sleep", i);
|
|
||||||
verifiers[i].sleep();
|
|
||||||
}
|
|
||||||
|
|
||||||
*verifier_count = target;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -660,22 +675,18 @@ impl<K: Kind> Drop for VerificationQueue<K> {
|
|||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
trace!(target: "shutdown", "[VerificationQueue] Closing...");
|
trace!(target: "shutdown", "[VerificationQueue] Closing...");
|
||||||
self.clear();
|
self.clear();
|
||||||
self.deleting.store(true, AtomicOrdering::Release);
|
self.deleting.store(true, AtomicOrdering::SeqCst);
|
||||||
|
|
||||||
let mut verifiers = self.verifiers.get_mut();
|
// set exit state; should be done before `more_to_verify` notification.
|
||||||
let mut verifiers = &mut verifiers.0;
|
*self.state.0.lock() = State::Exit;
|
||||||
|
self.state.1.notify_all();
|
||||||
// first pass to signal conclusion. must be done before
|
|
||||||
// notify or deadlock possible.
|
|
||||||
for handle in verifiers.iter() {
|
|
||||||
handle.conclude();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// wake up all threads waiting for more work.
|
||||||
self.more_to_verify.notify_all();
|
self.more_to_verify.notify_all();
|
||||||
|
|
||||||
// second pass to join.
|
// wait for all verifier threads to join.
|
||||||
for handle in verifiers.drain(..) {
|
for thread in self.verifier_handles.drain(..) {
|
||||||
handle.join();
|
thread.join().expect("Propagating verifier thread panic on shutdown");
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(target: "shutdown", "[VerificationQueue] Closed.");
|
trace!(target: "shutdown", "[VerificationQueue] Closed.");
|
||||||
@ -687,16 +698,21 @@ mod tests {
|
|||||||
use util::*;
|
use util::*;
|
||||||
use io::*;
|
use io::*;
|
||||||
use spec::*;
|
use spec::*;
|
||||||
use super::{BlockQueue, Config};
|
use super::{BlockQueue, Config, State};
|
||||||
use super::kind::blocks::Unverified;
|
use super::kind::blocks::Unverified;
|
||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
use error::*;
|
use error::*;
|
||||||
use views::*;
|
use views::*;
|
||||||
|
|
||||||
fn get_test_queue() -> BlockQueue {
|
// create a test block queue.
|
||||||
|
// auto_scaling enables verifier adjustment.
|
||||||
|
fn get_test_queue(auto_scale: bool) -> BlockQueue {
|
||||||
let spec = get_test_spec();
|
let spec = get_test_spec();
|
||||||
let engine = spec.engine;
|
let engine = spec.engine;
|
||||||
BlockQueue::new(Config::default(), engine, IoChannel::disconnected(), true)
|
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.verifier_settings.scale_verifiers = auto_scale;
|
||||||
|
BlockQueue::new(config, engine, IoChannel::disconnected(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -709,7 +725,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn can_import_blocks() {
|
fn can_import_blocks() {
|
||||||
let queue = get_test_queue();
|
let queue = get_test_queue(false);
|
||||||
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
|
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
|
||||||
panic!("error importing block that is valid by definition({:?})", e);
|
panic!("error importing block that is valid by definition({:?})", e);
|
||||||
}
|
}
|
||||||
@ -717,7 +733,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_error_for_duplicates() {
|
fn returns_error_for_duplicates() {
|
||||||
let queue = get_test_queue();
|
let queue = get_test_queue(false);
|
||||||
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
|
if let Err(e) = queue.import(Unverified::new(get_good_dummy_block())) {
|
||||||
panic!("error importing block that is valid by definition({:?})", e);
|
panic!("error importing block that is valid by definition({:?})", e);
|
||||||
}
|
}
|
||||||
@ -736,7 +752,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_ok_for_drained_duplicates() {
|
fn returns_ok_for_drained_duplicates() {
|
||||||
let queue = get_test_queue();
|
let queue = get_test_queue(false);
|
||||||
let block = get_good_dummy_block();
|
let block = get_good_dummy_block();
|
||||||
let hash = BlockView::new(&block).header().hash().clone();
|
let hash = BlockView::new(&block).header().hash().clone();
|
||||||
if let Err(e) = queue.import(Unverified::new(block)) {
|
if let Err(e) = queue.import(Unverified::new(block)) {
|
||||||
@ -753,7 +769,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn returns_empty_once_finished() {
|
fn returns_empty_once_finished() {
|
||||||
let queue = get_test_queue();
|
let queue = get_test_queue(false);
|
||||||
queue.import(Unverified::new(get_good_dummy_block()))
|
queue.import(Unverified::new(get_good_dummy_block()))
|
||||||
.expect("error importing block that is valid by definition");
|
.expect("error importing block that is valid by definition");
|
||||||
queue.flush();
|
queue.flush();
|
||||||
@ -781,30 +797,23 @@ mod tests {
|
|||||||
fn scaling_limits() {
|
fn scaling_limits() {
|
||||||
use super::MAX_VERIFIERS;
|
use super::MAX_VERIFIERS;
|
||||||
|
|
||||||
let queue = get_test_queue();
|
let queue = get_test_queue(true);
|
||||||
queue.scale_verifiers(MAX_VERIFIERS + 1);
|
queue.scale_verifiers(MAX_VERIFIERS + 1);
|
||||||
|
|
||||||
assert!(queue.verifiers.lock().1 < MAX_VERIFIERS + 1);
|
assert!(queue.num_verifiers() < MAX_VERIFIERS + 1);
|
||||||
|
|
||||||
queue.scale_verifiers(0);
|
queue.scale_verifiers(0);
|
||||||
|
|
||||||
assert!(queue.verifiers.lock().1 == 1);
|
assert!(queue.num_verifiers() == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn readjust_verifiers() {
|
fn readjust_verifiers() {
|
||||||
let queue = get_test_queue();
|
let queue = get_test_queue(true);
|
||||||
|
|
||||||
// put all the verifiers to sleep to ensure
|
// put all the verifiers to sleep to ensure
|
||||||
// the test isn't timing sensitive.
|
// the test isn't timing sensitive.
|
||||||
let num_verifiers = {
|
*queue.state.0.lock() = State::Work(0);
|
||||||
let verifiers = queue.verifiers.lock();
|
|
||||||
for i in 0..verifiers.1 {
|
|
||||||
verifiers.0[i].sleep();
|
|
||||||
}
|
|
||||||
|
|
||||||
verifiers.1
|
|
||||||
};
|
|
||||||
|
|
||||||
for block in get_good_dummy_block_seq(5000) {
|
for block in get_good_dummy_block_seq(5000) {
|
||||||
queue.import(Unverified::new(block)).expect("Block good by definition; qed");
|
queue.import(Unverified::new(block)).expect("Block good by definition; qed");
|
||||||
@ -812,20 +821,12 @@ mod tests {
|
|||||||
|
|
||||||
// almost all unverified == bump verifier count.
|
// almost all unverified == bump verifier count.
|
||||||
queue.collect_garbage();
|
queue.collect_garbage();
|
||||||
assert_eq!(queue.verifiers.lock().1, num_verifiers + 1);
|
assert_eq!(queue.num_verifiers(), 1);
|
||||||
|
|
||||||
// wake them up again and verify everything.
|
|
||||||
{
|
|
||||||
let verifiers = queue.verifiers.lock();
|
|
||||||
for i in 0..verifiers.1 {
|
|
||||||
verifiers.0[i].wake_up();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
queue.flush();
|
queue.flush();
|
||||||
|
|
||||||
// nothing to verify == use minimum number of verifiers.
|
// nothing to verify == use minimum number of verifiers.
|
||||||
queue.collect_garbage();
|
queue.collect_garbage();
|
||||||
assert_eq!(queue.verifiers.lock().1, 1);
|
assert_eq!(queue.num_verifiers(), 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "evmjit"
|
name = "evmjit"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["debris <marek.kotewicz@gmail.com>"]
|
authors = ["debris <marek.kotewicz@gmail.com>"]
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ethcore-ipc-codegen"
|
name = "ethcore-ipc-codegen"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["Nikolay Volf"]
|
authors = ["Nikolay Volf"]
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
description = "Macros to auto-generate implementations for ipc call"
|
description = "Macros to auto-generate implementations for ipc call"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ethcore-ipc-nano"
|
name = "ethcore-ipc-nano"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["Nikolay Volf <nikolay@ethcore.io>"]
|
authors = ["Nikolay Volf <nikolay@ethcore.io>"]
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ethcore-ipc"
|
name = "ethcore-ipc"
|
||||||
version = "1.4.0"
|
version = "1.5.0"
|
||||||
authors = ["Nikolay Volf <nikvolf@gmail.com>"]
|
authors = ["Nikolay Volf <nikvolf@gmail.com>"]
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
"transform-runtime",
|
"transform-runtime",
|
||||||
"transform-decorators-legacy",
|
"transform-decorators-legacy",
|
||||||
"transform-class-properties",
|
"transform-class-properties",
|
||||||
|
"transform-object-rest-spread",
|
||||||
"lodash"
|
"lodash"
|
||||||
],
|
],
|
||||||
"retainLines": true,
|
"retainLines": true,
|
||||||
|
4
js/assets/images/certifications/unknown.svg
Normal file
4
js/assets/images/certifications/unknown.svg
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
<svg width="100" height="100" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<circle fill="#4A90E2" cx="50" cy="50" r="50"/>
|
||||||
|
<path d="M20 45 L10 55 L35 85 L90 35 L80 25 L36 65 z" fill="#FFF"/>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 213 B |
234
js/package.json
234
js/package.json
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "parity.js",
|
"name": "parity.js",
|
||||||
"version": "0.2.78",
|
"version": "0.2.102",
|
||||||
"main": "release/index.js",
|
"main": "release/index.js",
|
||||||
"jsnext:main": "src/index.js",
|
"jsnext:main": "src/index.js",
|
||||||
"author": "Parity Team <admin@parity.io>",
|
"author": "Parity Team <admin@parity.io>",
|
||||||
@ -47,132 +47,128 @@
|
|||||||
"prepush": "npm run lint:cached"
|
"prepush": "npm run lint:cached"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"babel-cli": "~6.18.0",
|
"babel-cli": "6.18.0",
|
||||||
"babel-core": "~6.18.2",
|
"babel-core": "6.20.0",
|
||||||
"babel-eslint": "~7.1.0",
|
"babel-eslint": "7.1.1",
|
||||||
"babel-loader": "~6.2.3",
|
"babel-loader": "6.2.8",
|
||||||
"babel-plugin-lodash": "~3.2.2",
|
"babel-plugin-lodash": "3.2.10",
|
||||||
"babel-plugin-transform-class-properties": "~6.19.0",
|
"babel-plugin-transform-class-properties": "6.18.0",
|
||||||
"babel-plugin-transform-decorators-legacy": "~1.3.4",
|
"babel-plugin-transform-decorators-legacy": "1.3.4",
|
||||||
"babel-plugin-transform-react-remove-prop-types": "~0.2.9",
|
"babel-plugin-transform-object-rest-spread": "6.20.2",
|
||||||
"babel-plugin-transform-runtime": "~6.15.0",
|
"babel-plugin-transform-react-remove-prop-types": "0.2.11",
|
||||||
"babel-polyfill": "~6.16.0",
|
"babel-plugin-transform-runtime": "6.15.0",
|
||||||
"babel-preset-es2015": "~6.18.0",
|
"babel-polyfill": "6.20.0",
|
||||||
"babel-preset-es2015-rollup": "~1.2.0",
|
"babel-preset-es2015": "6.18.0",
|
||||||
"babel-preset-es2016": "~6.16.0",
|
"babel-preset-es2016": "6.16.0",
|
||||||
"babel-preset-es2017": "~6.16.0",
|
"babel-preset-es2017": "6.16.0",
|
||||||
"babel-preset-react": "~6.16.0",
|
"babel-preset-react": "6.16.0",
|
||||||
"babel-preset-stage-0": "~6.16.0",
|
"babel-preset-stage-0": "6.16.0",
|
||||||
"babel-register": "6.18.0",
|
"babel-register": "6.18.0",
|
||||||
"babel-runtime": "~6.18.0",
|
"babel-runtime": "6.20.0",
|
||||||
"chai": "~3.5.0",
|
"chai": "3.5.0",
|
||||||
"chai-enzyme": "0.4.2",
|
"chai-enzyme": "0.6.1",
|
||||||
"cheerio": "0.20.0",
|
"circular-dependency-plugin": "2.0.0",
|
||||||
"copy-webpack-plugin": "~4.0.0",
|
"copy-webpack-plugin": "4.0.1",
|
||||||
"core-js": "~2.4.1",
|
"core-js": "2.4.1",
|
||||||
"coveralls": "~2.11.11",
|
"coveralls": "2.11.15",
|
||||||
"css-loader": "~0.26.0",
|
"css-loader": "0.26.1",
|
||||||
"enzyme": "2.3.0",
|
"ejs-loader": "0.3.0",
|
||||||
"eslint": "~3.10.2",
|
"enzyme": "2.6.0",
|
||||||
"eslint-config-semistandard": "~7.0.0",
|
"eslint": "3.11.1",
|
||||||
"eslint-config-standard": "~6.2.1",
|
"eslint-config-semistandard": "7.0.0",
|
||||||
"eslint-config-standard-react": "~4.2.0",
|
"eslint-config-standard": "6.2.1",
|
||||||
"eslint-plugin-promise": "~3.4.0",
|
"eslint-config-standard-react": "4.2.0",
|
||||||
"eslint-plugin-react": "~6.7.1",
|
"eslint-plugin-promise": "3.4.0",
|
||||||
"eslint-plugin-standard": "~2.0.0",
|
"eslint-plugin-react": "6.7.1",
|
||||||
"express": "~4.14.0",
|
"eslint-plugin-standard": "2.0.1",
|
||||||
|
"express": "4.14.0",
|
||||||
"extract-loader": "0.1.0",
|
"extract-loader": "0.1.0",
|
||||||
"extract-text-webpack-plugin": "~2.0.0-beta.4",
|
"extract-text-webpack-plugin": "2.0.0-beta.4",
|
||||||
"file-loader": "~0.9.0",
|
"file-loader": "0.9.0",
|
||||||
"fs-extra": "~0.30.0",
|
"happypack": "3.0.0",
|
||||||
"happypack": "~3.0.0",
|
"html-loader": "0.4.4",
|
||||||
"history": "~2.0.0",
|
"html-webpack-plugin": "2.24.1",
|
||||||
"html-loader": "~0.4.4",
|
"http-proxy-middleware": "0.17.2",
|
||||||
"html-webpack-plugin": "~2.24.1",
|
"husky": "0.11.9",
|
||||||
"http-proxy-middleware": "~0.17.2",
|
"ignore-styles": "5.0.1",
|
||||||
"husky": "~0.11.9",
|
"image-webpack-loader": "3.0.0",
|
||||||
"ignore-styles": "2.0.0",
|
"istanbul": "1.0.0-alpha.2",
|
||||||
"image-webpack-loader": "~3.0.0",
|
"jsdom": "9.8.3",
|
||||||
"istanbul": "~1.0.0-alpha.2",
|
"json-loader": "0.5.4",
|
||||||
"jsdom": "9.2.1",
|
"mocha": "3.2.0",
|
||||||
"json-loader": "~0.5.4",
|
|
||||||
"mocha": "~3.0.0-1",
|
|
||||||
"mock-local-storage": "1.0.2",
|
"mock-local-storage": "1.0.2",
|
||||||
"mock-socket": "~3.0.1",
|
"mock-socket": "6.0.3",
|
||||||
"nock": "~8.0.0",
|
"nock": "9.0.2",
|
||||||
"postcss-import": "8.1.0",
|
"postcss-import": "8.1.0",
|
||||||
"postcss-loader": "~1.1.1",
|
"postcss-loader": "1.1.1",
|
||||||
"postcss-nested": "~1.0.0",
|
"postcss-nested": "1.0.0",
|
||||||
"postcss-simple-vars": "~3.0.0",
|
"postcss-simple-vars": "3.0.0",
|
||||||
"progress": "~1.1.8",
|
"progress": "1.1.8",
|
||||||
"raw-loader": "~0.5.1",
|
"raw-loader": "0.5.1",
|
||||||
"react-addons-perf": "~15.3.2",
|
"react-addons-perf": "15.4.1",
|
||||||
"react-addons-test-utils": "~15.3.2",
|
"react-addons-test-utils": "15.4.1",
|
||||||
"react-dom": "~15.3.2",
|
"react-hot-loader": "3.0.0-beta.6",
|
||||||
"react-hot-loader": "~3.0.0-beta.6",
|
"rucksack-css": "0.9.1",
|
||||||
"rucksack-css": "~0.8.6",
|
"sinon": "1.17.6",
|
||||||
"sinon": "~1.17.4",
|
"sinon-as-promised": "4.0.2",
|
||||||
"sinon-as-promised": "~4.0.2",
|
"sinon-chai": "2.8.0",
|
||||||
"sinon-chai": "~2.8.0",
|
"style-loader": "0.13.1",
|
||||||
"style-loader": "~0.13.0",
|
"url-loader": "0.5.7",
|
||||||
"url-loader": "~0.5.7",
|
"webpack": "2.1.0-beta.27",
|
||||||
"webpack": "~2.1.0-beta.27",
|
"webpack-dev-middleware": "1.8.4",
|
||||||
"webpack-dev-middleware": "~1.8.4",
|
|
||||||
"webpack-error-notification": "0.1.6",
|
"webpack-error-notification": "0.1.6",
|
||||||
"webpack-hot-middleware": "~2.13.2",
|
"webpack-hot-middleware": "2.13.2",
|
||||||
"websocket": "~1.0.23"
|
"websocket": "1.0.23"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"bignumber.js": "~2.3.0",
|
"bignumber.js": "3.0.1",
|
||||||
"blockies": "0.0.2",
|
"blockies": "0.0.2",
|
||||||
"brace": "~0.9.0",
|
"brace": "0.9.0",
|
||||||
"bytes": "~2.4.0",
|
"bytes": "2.4.0",
|
||||||
"chart.js": "~2.3.0",
|
"es6-error": "4.0.0",
|
||||||
"es6-error": "~4.0.0",
|
"es6-promise": "4.0.5",
|
||||||
"es6-promise": "~3.2.1",
|
"ethereumjs-tx": "1.1.4",
|
||||||
"ethereumjs-tx": "~1.1.2",
|
"eventemitter3": "2.0.2",
|
||||||
"eventemitter3": "~2.0.2",
|
"file-saver": "1.3.3",
|
||||||
"file-saver": "~1.3.3",
|
"format-json": "1.0.3",
|
||||||
"format-json": "~1.0.3",
|
"format-number": "2.0.1",
|
||||||
"format-number": "~2.0.1",
|
"geopattern": "1.2.3",
|
||||||
"geopattern": "~1.2.3",
|
"isomorphic-fetch": "2.2.1",
|
||||||
"isomorphic-fetch": "~2.2.1",
|
"js-sha3": "0.5.5",
|
||||||
"js-sha3": "~0.5.2",
|
"lodash": "4.17.2",
|
||||||
"lodash": "~4.11.1",
|
"marked": "0.3.6",
|
||||||
"marked": "~0.3.6",
|
"material-ui": "0.16.4",
|
||||||
"material-ui": "0.16.1",
|
"material-ui-chip-input": "0.11.1",
|
||||||
"material-ui-chip-input": "~0.8.0",
|
"mobx": "2.6.4",
|
||||||
"mobx": "~2.6.1",
|
"mobx-react": "4.0.3",
|
||||||
"mobx-react": "~3.5.8",
|
"mobx-react-devtools": "4.2.10",
|
||||||
"mobx-react-devtools": "~4.2.9",
|
"moment": "2.17.0",
|
||||||
"moment": "~2.14.1",
|
"phoneformat.js": "1.0.3",
|
||||||
"phoneformat.js": "~1.0.3",
|
"qs": "6.3.0",
|
||||||
"qs": "~6.3.0",
|
"react": "15.4.1",
|
||||||
"react": "~15.3.2",
|
"react-ace": "4.1.0",
|
||||||
"react-ace": "~4.0.0",
|
"react-addons-css-transition-group": "15.4.1",
|
||||||
"react-addons-css-transition-group": "~15.3.2",
|
"react-copy-to-clipboard": "4.2.3",
|
||||||
"react-chartjs-2": "~1.5.0",
|
"react-dom": "15.4.1",
|
||||||
"react-copy-to-clipboard": "~4.2.3",
|
"react-dropzone": "3.7.3",
|
||||||
"react-dom": "~15.3.2",
|
"react-redux": "4.4.6",
|
||||||
"react-dropzone": "~3.7.3",
|
"react-router": "3.0.0",
|
||||||
"react-redux": "~4.4.5",
|
"react-router-redux": "4.0.7",
|
||||||
"react-router": "~2.6.1",
|
"react-tap-event-plugin": "2.0.1",
|
||||||
"react-router-redux": "~4.0.5",
|
"react-tooltip": "3.2.2",
|
||||||
"react-tap-event-plugin": "~1.0.0",
|
"recharts": "0.15.2",
|
||||||
"react-tooltip": "~2.0.3",
|
"redux": "3.6.0",
|
||||||
"recharts": "~0.15.2",
|
"redux-actions": "1.1.0",
|
||||||
"redux": "~3.5.2",
|
"redux-thunk": "2.1.0",
|
||||||
"redux-actions": "~0.10.1",
|
"rlp": "2.0.0",
|
||||||
"redux-thunk": "~2.1.0",
|
"scryptsy": "2.0.0",
|
||||||
"rlp": "~2.0.0",
|
|
||||||
"scryptsy": "~2.0.0",
|
|
||||||
"solc": "ngotchac/solc-js",
|
"solc": "ngotchac/solc-js",
|
||||||
"store": "~1.3.20",
|
"store": "1.3.20",
|
||||||
"utf8": "~2.1.1",
|
"utf8": "2.1.2",
|
||||||
"valid-url": "~1.0.9",
|
"valid-url": "1.0.9",
|
||||||
"validator": "~5.7.0",
|
"validator": "6.2.0",
|
||||||
"web3": "~0.17.0-beta",
|
"web3": "0.17.0-beta",
|
||||||
"whatwg-fetch": "~1.0.0",
|
"whatwg-fetch": "2.0.1",
|
||||||
"worker-loader": "~0.7.1"
|
"worker-loader": "0.7.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -34,11 +34,18 @@ git fetch origin 2>$GITLOG
|
|||||||
git checkout -b $BRANCH
|
git checkout -b $BRANCH
|
||||||
|
|
||||||
echo "*** Committing compiled files for $UTCDATE"
|
echo "*** Committing compiled files for $UTCDATE"
|
||||||
|
mv build ../build.new
|
||||||
git add .
|
git add .
|
||||||
git commit -m "$UTCDATE"
|
git commit -m "$UTCDATE [update]"
|
||||||
|
git merge origin/$BRANCH -X ours --commit -m "$UTCDATE [merge]"
|
||||||
|
git rm -r build
|
||||||
|
rm -rf build
|
||||||
|
git commit -m "$UTCDATE [cleanup]"
|
||||||
|
mv ../build.new build
|
||||||
|
git add .
|
||||||
|
git commit -m "$UTCDATE [release]"
|
||||||
|
|
||||||
echo "*** Merging remote"
|
echo "*** Merging remote"
|
||||||
git merge origin/$BRANCH -X ours --commit -m "$UTCDATE [release]"
|
|
||||||
git push origin HEAD:refs/heads/$BRANCH 2>$GITLOG
|
git push origin HEAD:refs/heads/$BRANCH 2>$GITLOG
|
||||||
PRECOMPILED_HASH=`git rev-parse HEAD`
|
PRECOMPILED_HASH=`git rev-parse HEAD`
|
||||||
|
|
||||||
|
1
js/scripts/test.js
Normal file
1
js/scripts/test.js
Normal file
@ -0,0 +1 @@
|
|||||||
|
// test script 6
|
@ -27,9 +27,5 @@ export function sliceData (_data) {
|
|||||||
data = padAddress('');
|
data = padAddress('');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data.length % 64) {
|
|
||||||
throw new Error(`Invalid data length (not mod 64) passed to sliceData, ${data}, % 64 == ${data.length % 64}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return data.match(/.{1,64}/g);
|
return data.match(/.{1,64}/g);
|
||||||
}
|
}
|
||||||
|
@ -21,10 +21,6 @@ describe('abi/util/slice', () => {
|
|||||||
const slice1 = '131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b';
|
const slice1 = '131a3afc00d1b1e3461b955e53fc866dcf303b3eb9f4c16f89e388930f48134b';
|
||||||
const slice2 = '2124768576358735263578356373526387638357635873563586353756358763';
|
const slice2 = '2124768576358735263578356373526387638357635873563586353756358763';
|
||||||
|
|
||||||
it('throws an error on mod 64 != 0', () => {
|
|
||||||
expect(() => sliceData('123')).to.throw(/sliceData/);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('returns an empty array when length === 0', () => {
|
it('returns an empty array when length === 0', () => {
|
||||||
expect(sliceData('')).to.deep.equal([]);
|
expect(sliceData('')).to.deep.equal([]);
|
||||||
});
|
});
|
||||||
|
@ -189,15 +189,21 @@ export default class Contract {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
_encodeOptions (func, options, values) {
|
getCallData = (func, options, values) => {
|
||||||
|
let data = options.data;
|
||||||
|
|
||||||
const tokens = func ? this._abi.encodeTokens(func.inputParamTypes(), values) : null;
|
const tokens = func ? this._abi.encodeTokens(func.inputParamTypes(), values) : null;
|
||||||
const call = tokens ? func.encodeCall(tokens) : null;
|
const call = tokens ? func.encodeCall(tokens) : null;
|
||||||
|
|
||||||
if (options.data && options.data.substr(0, 2) === '0x') {
|
if (data && data.substr(0, 2) === '0x') {
|
||||||
options.data = options.data.substr(2);
|
data = data.substr(2);
|
||||||
}
|
}
|
||||||
options.data = `0x${options.data || ''}${call || ''}`;
|
|
||||||
|
|
||||||
|
return `0x${data || ''}${call || ''}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
_encodeOptions (func, options, values) {
|
||||||
|
options.data = this.getCallData(func, options, values);
|
||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,8 +215,10 @@ export default class Contract {
|
|||||||
|
|
||||||
_bindFunction = (func) => {
|
_bindFunction = (func) => {
|
||||||
func.call = (options, values = []) => {
|
func.call = (options, values = []) => {
|
||||||
|
const callParams = this._encodeOptions(func, this._addOptionsTo(options), values);
|
||||||
|
|
||||||
return this._api.eth
|
return this._api.eth
|
||||||
.call(this._encodeOptions(func, this._addOptionsTo(options), values))
|
.call(callParams)
|
||||||
.then((encoded) => func.decodeOutput(encoded))
|
.then((encoded) => func.decodeOutput(encoded))
|
||||||
.then((tokens) => tokens.map((token) => token.value))
|
.then((tokens) => tokens.map((token) => token.value))
|
||||||
.then((returns) => returns.length === 1 ? returns[0] : returns);
|
.then((returns) => returns.length === 1 ? returns[0] : returns);
|
||||||
@ -240,9 +248,28 @@ export default class Contract {
|
|||||||
return this.unsubscribe(subscriptionId);
|
return this.unsubscribe(subscriptionId);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
event.getAllLogs = (options = {}) => {
|
||||||
|
return this.getAllLogs(event);
|
||||||
|
};
|
||||||
|
|
||||||
return event;
|
return event;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
getAllLogs (event, _options) {
|
||||||
|
// Options as first parameter
|
||||||
|
if (!_options && event && event.topics) {
|
||||||
|
return this.getAllLogs(null, event);
|
||||||
|
}
|
||||||
|
|
||||||
|
const options = this._getFilterOptions(event, _options);
|
||||||
|
options.fromBlock = 0;
|
||||||
|
options.toBlock = 'latest';
|
||||||
|
|
||||||
|
return this._api.eth
|
||||||
|
.getLogs(options)
|
||||||
|
.then((logs) => this.parseEventLogs(logs));
|
||||||
|
}
|
||||||
|
|
||||||
_findEvent (eventName = null) {
|
_findEvent (eventName = null) {
|
||||||
const event = eventName
|
const event = eventName
|
||||||
? this._events.find((evt) => evt.name === eventName)
|
? this._events.find((evt) => evt.name === eventName)
|
||||||
@ -256,7 +283,7 @@ export default class Contract {
|
|||||||
return event;
|
return event;
|
||||||
}
|
}
|
||||||
|
|
||||||
_createEthFilter (event = null, _options) {
|
_getFilterOptions (event = null, _options = {}) {
|
||||||
const optionTopics = _options.topics || [];
|
const optionTopics = _options.topics || [];
|
||||||
const signature = event && event.signature || null;
|
const signature = event && event.signature || null;
|
||||||
|
|
||||||
@ -271,6 +298,11 @@ export default class Contract {
|
|||||||
topics
|
topics
|
||||||
});
|
});
|
||||||
|
|
||||||
|
return options;
|
||||||
|
}
|
||||||
|
|
||||||
|
_createEthFilter (event = null, _options) {
|
||||||
|
const options = this._getFilterOptions(event, _options);
|
||||||
return this._api.eth.newFilter(options);
|
return this._api.eth.newFilter(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +112,15 @@ export function inNumber10 (number) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function inNumber16 (number) {
|
export function inNumber16 (number) {
|
||||||
if (isInstanceOf(number, BigNumber)) {
|
const bn = isInstanceOf(number, BigNumber)
|
||||||
return inHex(number.toString(16));
|
? number
|
||||||
|
: (new BigNumber(number || 0));
|
||||||
|
|
||||||
|
if (!bn.isInteger()) {
|
||||||
|
throw new Error(`[format/input::inNumber16] the given number is not an integer: ${bn.toFormat()}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
return inHex((new BigNumber(number || 0)).toString(16));
|
return inHex(bn.toString(16));
|
||||||
}
|
}
|
||||||
|
|
||||||
export function inOptions (options) {
|
export function inOptions (options) {
|
||||||
@ -130,6 +134,9 @@ export function inOptions (options) {
|
|||||||
|
|
||||||
case 'gas':
|
case 'gas':
|
||||||
case 'gasPrice':
|
case 'gasPrice':
|
||||||
|
options[key] = inNumber16((new BigNumber(options[key])).round());
|
||||||
|
break;
|
||||||
|
|
||||||
case 'value':
|
case 'value':
|
||||||
case 'nonce':
|
case 'nonce':
|
||||||
options[key] = inNumber16(options[key]);
|
options[key] = inNumber16(options[key]);
|
||||||
|
@ -144,7 +144,8 @@ export function outSignerRequest (request) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 'payload':
|
case 'payload':
|
||||||
request[key].transaction = outTransaction(request[key].transaction);
|
request[key].signTransaction = outTransaction(request[key].signTransaction);
|
||||||
|
request[key].sendTransaction = outTransaction(request[key].sendTransaction);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -146,7 +146,8 @@ export default class Eth {
|
|||||||
|
|
||||||
getLogs (options) {
|
getLogs (options) {
|
||||||
return this._transport
|
return this._transport
|
||||||
.execute('eth_getLogs', inFilter(options));
|
.execute('eth_getLogs', inFilter(options))
|
||||||
|
.then((logs) => logs.map(outLog));
|
||||||
}
|
}
|
||||||
|
|
||||||
getLogsEx (options) {
|
getLogsEx (options) {
|
||||||
|
@ -128,6 +128,11 @@ export default class Parity {
|
|||||||
.execute('parity_killAccount', inAddress(account), password);
|
.execute('parity_killAccount', inAddress(account), password);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
removeAddress (address) {
|
||||||
|
return this._transport
|
||||||
|
.execute('parity_removeAddress', inAddress(address));
|
||||||
|
}
|
||||||
|
|
||||||
listGethAccounts () {
|
listGethAccounts () {
|
||||||
return this._transport
|
return this._transport
|
||||||
.execute('parity_listGethAccounts')
|
.execute('parity_listGethAccounts')
|
||||||
|
@ -68,6 +68,7 @@ export default class Personal {
|
|||||||
this._accountsInfo();
|
this._accountsInfo();
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
case 'parity_removeAddress':
|
||||||
case 'parity_setAccountName':
|
case 'parity_setAccountName':
|
||||||
case 'parity_setAccountMeta':
|
case 'parity_setAccountMeta':
|
||||||
this._accountsInfo();
|
this._accountsInfo();
|
||||||
|
@ -32,6 +32,10 @@ export function hex2Ascii (_hex) {
|
|||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function bytesToAscii (bytes) {
|
||||||
|
return bytes.map((b) => String.fromCharCode(b % 512)).join('');
|
||||||
|
}
|
||||||
|
|
||||||
export function asciiToHex (string) {
|
export function asciiToHex (string) {
|
||||||
return '0x' + string.split('').map((s) => s.charCodeAt(0).toString(16)).join('');
|
return '0x' + string.split('').map((s) => s.charCodeAt(0).toString(16)).join('');
|
||||||
}
|
}
|
||||||
|
1
js/src/contracts/abi/badgereg.json
Normal file
1
js/src/contracts/abi/badgereg.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_addr","type":"address"},{"name":"_name","type":"bytes32"}],"name":"register","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"fromName","outputs":[{"name":"id","type":"uint256"},{"name":"addr","type":"address"},{"name":"owner","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"badgeCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_fee","type":"uint256"}],"name":"setFee","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_id","type":"uint256"},{"name":"_key","type":"bytes32"}],"name":"meta","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_id","type":"uint256"}],"name":"unregister","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_addr","type":"address"}],"name":"fromAddress","outputs":[{"name":"id","type":"uint256"},{"name":"name","type":"bytes32"},{"name":"owner","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_id","type":"uint256"}],"name":"badge","outputs":[{"name":"addr","type":"address"},{"name":"name","type":"bytes32"},{"name":"owner","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_id","type":"uint256"},{"name":"_key","type":"bytes32"},{"name":"_value","type":"bytes32"}],"name":"setMeta","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_addr","type":"address"},{"name":"_name","type":"bytes32"},{"name":"_owner","type":"address"}],"name":"registerAs","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"id","type":"uint256"},{"indexed":false,"name":"addr","type":"address"}],"name":"Registered","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"id","type":"uint256"}],"name":"Unregistered","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"id","type":"uint256"},{"indexed":true,"name":"key","type":"bytes32"},{"indexed":false,"name":"value","type":"bytes32"}],"name":"MetaChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"}]
|
1
js/src/contracts/abi/certifier.json
Normal file
1
js/src/contracts/abi/certifier.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
[{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"who","type":"address"}],"name":"Confirmed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"who","type":"address"}],"name":"Revoked","type":"event"}]
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import badgereg from './badgereg.json';
|
||||||
import basiccoin from './basiccoin.json';
|
import basiccoin from './basiccoin.json';
|
||||||
import basiccoinmanager from './basiccoinmanager.json';
|
import basiccoinmanager from './basiccoinmanager.json';
|
||||||
import dappreg from './dappreg.json';
|
import dappreg from './dappreg.json';
|
||||||
@ -28,6 +29,7 @@ import tokenreg from './tokenreg.json';
|
|||||||
import wallet from './wallet.json';
|
import wallet from './wallet.json';
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
badgereg,
|
||||||
basiccoin,
|
basiccoin,
|
||||||
basiccoinmanager,
|
basiccoinmanager,
|
||||||
dappreg,
|
dappreg,
|
||||||
|
66
js/src/contracts/badgereg.js
Normal file
66
js/src/contracts/badgereg.js
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import { bytesToHex, hex2Ascii } from '~/api/util/format';
|
||||||
|
|
||||||
|
import ABI from './abi/certifier.json';
|
||||||
|
|
||||||
|
const ZERO = '0x0000000000000000000000000000000000000000000000000000000000000000';
|
||||||
|
|
||||||
|
export default class BadgeReg {
|
||||||
|
constructor (api, registry) {
|
||||||
|
this._api = api;
|
||||||
|
this._registry = registry;
|
||||||
|
|
||||||
|
registry.getContract('badgereg');
|
||||||
|
this.certifiers = {}; // by name
|
||||||
|
this.contracts = {}; // by name
|
||||||
|
}
|
||||||
|
|
||||||
|
fetchCertifier (name) {
|
||||||
|
if (this.certifiers[name]) {
|
||||||
|
return Promise.resolve(this.certifiers[name]);
|
||||||
|
}
|
||||||
|
return this._registry.getContract('badgereg')
|
||||||
|
.then((badgeReg) => {
|
||||||
|
return badgeReg.instance.fromName.call({}, [name])
|
||||||
|
.then(([ id, address ]) => {
|
||||||
|
return Promise.all([
|
||||||
|
badgeReg.instance.meta.call({}, [id, 'TITLE']),
|
||||||
|
badgeReg.instance.meta.call({}, [id, 'IMG'])
|
||||||
|
])
|
||||||
|
.then(([ title, img ]) => {
|
||||||
|
title = bytesToHex(title);
|
||||||
|
title = title === ZERO ? null : hex2Ascii(title);
|
||||||
|
if (bytesToHex(img) === ZERO) img = null;
|
||||||
|
|
||||||
|
const data = { address, name, title, icon: img };
|
||||||
|
this.certifiers[name] = data;
|
||||||
|
return data;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
checkIfCertified (certifier, address) {
|
||||||
|
if (!this.contracts[certifier]) {
|
||||||
|
this.contracts[certifier] = this._api.newContract(ABI, certifier);
|
||||||
|
}
|
||||||
|
const contract = this.contracts[certifier];
|
||||||
|
|
||||||
|
return contract.instance.certified.call({}, [address]);
|
||||||
|
}
|
||||||
|
}
|
@ -14,5 +14,8 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
export RequestFinished from './RequestFinished';
|
import wallet from './wallet';
|
||||||
export RequestPending from './RequestPending';
|
|
||||||
|
export {
|
||||||
|
wallet
|
||||||
|
};
|
23
js/src/contracts/code/wallet.js
Normal file
23
js/src/contracts/code/wallet.js
Normal file
File diff suppressed because one or more lines are too long
@ -20,6 +20,7 @@ import SignatureReg from './signaturereg';
|
|||||||
import TokenReg from './tokenreg';
|
import TokenReg from './tokenreg';
|
||||||
import GithubHint from './githubhint';
|
import GithubHint from './githubhint';
|
||||||
import * as smsVerification from './sms-verification';
|
import * as smsVerification from './sms-verification';
|
||||||
|
import BadgeReg from './badgereg';
|
||||||
|
|
||||||
let instance = null;
|
let instance = null;
|
||||||
|
|
||||||
@ -33,6 +34,7 @@ export default class Contracts {
|
|||||||
this._signaturereg = new SignatureReg(api, this._registry);
|
this._signaturereg = new SignatureReg(api, this._registry);
|
||||||
this._tokenreg = new TokenReg(api, this._registry);
|
this._tokenreg = new TokenReg(api, this._registry);
|
||||||
this._githubhint = new GithubHint(api, this._registry);
|
this._githubhint = new GithubHint(api, this._registry);
|
||||||
|
this.badgeReg = new BadgeReg(api, this._registry);
|
||||||
}
|
}
|
||||||
|
|
||||||
get registry () {
|
get registry () {
|
||||||
|
388
js/src/contracts/snippets/wallet.sol
Normal file
388
js/src/contracts/snippets/wallet.sol
Normal file
@ -0,0 +1,388 @@
|
|||||||
|
//sol Wallet
|
||||||
|
// Multi-sig, daily-limited account proxy/wallet.
|
||||||
|
// @authors:
|
||||||
|
// Gav Wood <g@ethdev.com>
|
||||||
|
// inheritable "property" contract that enables methods to be protected by requiring the acquiescence of either a
|
||||||
|
// single, or, crucially, each of a number of, designated owners.
|
||||||
|
// usage:
|
||||||
|
// use modifiers onlyowner (just own owned) or onlymanyowners(hash), whereby the same hash must be provided by
|
||||||
|
// some number (specified in constructor) of the set of owners (specified in the constructor, modifiable) before the
|
||||||
|
// interior is executed.
|
||||||
|
pragma solidity ^0.4.6;
|
||||||
|
|
||||||
|
contract multiowned {
|
||||||
|
|
||||||
|
// TYPES
|
||||||
|
|
||||||
|
// struct for the status of a pending operation.
|
||||||
|
struct PendingState {
|
||||||
|
uint yetNeeded;
|
||||||
|
uint ownersDone;
|
||||||
|
uint index;
|
||||||
|
}
|
||||||
|
|
||||||
|
// EVENTS
|
||||||
|
|
||||||
|
// this contract only has six types of events: it can accept a confirmation, in which case
|
||||||
|
// we record owner and operation (hash) alongside it.
|
||||||
|
event Confirmation(address owner, bytes32 operation);
|
||||||
|
event Revoke(address owner, bytes32 operation);
|
||||||
|
// some others are in the case of an owner changing.
|
||||||
|
event OwnerChanged(address oldOwner, address newOwner);
|
||||||
|
event OwnerAdded(address newOwner);
|
||||||
|
event OwnerRemoved(address oldOwner);
|
||||||
|
// the last one is emitted if the required signatures change
|
||||||
|
event RequirementChanged(uint newRequirement);
|
||||||
|
|
||||||
|
// MODIFIERS
|
||||||
|
|
||||||
|
// simple single-sig function modifier.
|
||||||
|
modifier onlyowner {
|
||||||
|
if (isOwner(msg.sender))
|
||||||
|
_;
|
||||||
|
}
|
||||||
|
// multi-sig function modifier: the operation must have an intrinsic hash in order
|
||||||
|
// that later attempts can be realised as the same underlying operation and
|
||||||
|
// thus count as confirmations.
|
||||||
|
modifier onlymanyowners(bytes32 _operation) {
|
||||||
|
if (confirmAndCheck(_operation))
|
||||||
|
_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// METHODS
|
||||||
|
|
||||||
|
// constructor is given number of sigs required to do protected "onlymanyowners" transactions
|
||||||
|
// as well as the selection of addresses capable of confirming them.
|
||||||
|
function multiowned(address[] _owners, uint _required) {
|
||||||
|
m_numOwners = _owners.length + 1;
|
||||||
|
m_owners[1] = uint(msg.sender);
|
||||||
|
m_ownerIndex[uint(msg.sender)] = 1;
|
||||||
|
for (uint i = 0; i < _owners.length; ++i)
|
||||||
|
{
|
||||||
|
m_owners[2 + i] = uint(_owners[i]);
|
||||||
|
m_ownerIndex[uint(_owners[i])] = 2 + i;
|
||||||
|
}
|
||||||
|
m_required = _required;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Revokes a prior confirmation of the given operation
|
||||||
|
function revoke(bytes32 _operation) external {
|
||||||
|
uint ownerIndex = m_ownerIndex[uint(msg.sender)];
|
||||||
|
// make sure they're an owner
|
||||||
|
if (ownerIndex == 0) return;
|
||||||
|
uint ownerIndexBit = 2**ownerIndex;
|
||||||
|
var pending = m_pending[_operation];
|
||||||
|
if (pending.ownersDone & ownerIndexBit > 0) {
|
||||||
|
pending.yetNeeded++;
|
||||||
|
pending.ownersDone -= ownerIndexBit;
|
||||||
|
Revoke(msg.sender, _operation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replaces an owner `_from` with another `_to`.
|
||||||
|
function changeOwner(address _from, address _to) onlymanyowners(sha3(msg.data)) external {
|
||||||
|
if (isOwner(_to)) return;
|
||||||
|
uint ownerIndex = m_ownerIndex[uint(_from)];
|
||||||
|
if (ownerIndex == 0) return;
|
||||||
|
|
||||||
|
clearPending();
|
||||||
|
m_owners[ownerIndex] = uint(_to);
|
||||||
|
m_ownerIndex[uint(_from)] = 0;
|
||||||
|
m_ownerIndex[uint(_to)] = ownerIndex;
|
||||||
|
OwnerChanged(_from, _to);
|
||||||
|
}
|
||||||
|
|
||||||
|
function addOwner(address _owner) onlymanyowners(sha3(msg.data)) external {
|
||||||
|
if (isOwner(_owner)) return;
|
||||||
|
|
||||||
|
clearPending();
|
||||||
|
if (m_numOwners >= c_maxOwners)
|
||||||
|
reorganizeOwners();
|
||||||
|
if (m_numOwners >= c_maxOwners)
|
||||||
|
return;
|
||||||
|
m_numOwners++;
|
||||||
|
m_owners[m_numOwners] = uint(_owner);
|
||||||
|
m_ownerIndex[uint(_owner)] = m_numOwners;
|
||||||
|
OwnerAdded(_owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
function removeOwner(address _owner) onlymanyowners(sha3(msg.data)) external {
|
||||||
|
uint ownerIndex = m_ownerIndex[uint(_owner)];
|
||||||
|
if (ownerIndex == 0) return;
|
||||||
|
if (m_required > m_numOwners - 1) return;
|
||||||
|
|
||||||
|
m_owners[ownerIndex] = 0;
|
||||||
|
m_ownerIndex[uint(_owner)] = 0;
|
||||||
|
clearPending();
|
||||||
|
reorganizeOwners(); //make sure m_numOwner is equal to the number of owners and always points to the optimal free slot
|
||||||
|
OwnerRemoved(_owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
function changeRequirement(uint _newRequired) onlymanyowners(sha3(msg.data)) external {
|
||||||
|
if (_newRequired > m_numOwners) return;
|
||||||
|
m_required = _newRequired;
|
||||||
|
clearPending();
|
||||||
|
RequirementChanged(_newRequired);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets an owner by 0-indexed position (using numOwners as the count)
|
||||||
|
function getOwner(uint ownerIndex) external constant returns (address) {
|
||||||
|
return address(m_owners[ownerIndex + 1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function isOwner(address _addr) returns (bool) {
|
||||||
|
return m_ownerIndex[uint(_addr)] > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
function hasConfirmed(bytes32 _operation, address _owner) constant returns (bool) {
|
||||||
|
var pending = m_pending[_operation];
|
||||||
|
uint ownerIndex = m_ownerIndex[uint(_owner)];
|
||||||
|
|
||||||
|
// make sure they're an owner
|
||||||
|
if (ownerIndex == 0) return false;
|
||||||
|
|
||||||
|
// determine the bit to set for this owner.
|
||||||
|
uint ownerIndexBit = 2**ownerIndex;
|
||||||
|
return !(pending.ownersDone & ownerIndexBit == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// INTERNAL METHODS
|
||||||
|
|
||||||
|
function confirmAndCheck(bytes32 _operation) internal returns (bool) {
|
||||||
|
// determine what index the present sender is:
|
||||||
|
uint ownerIndex = m_ownerIndex[uint(msg.sender)];
|
||||||
|
// make sure they're an owner
|
||||||
|
if (ownerIndex == 0) return;
|
||||||
|
|
||||||
|
var pending = m_pending[_operation];
|
||||||
|
// if we're not yet working on this operation, switch over and reset the confirmation status.
|
||||||
|
if (pending.yetNeeded == 0) {
|
||||||
|
// reset count of confirmations needed.
|
||||||
|
pending.yetNeeded = m_required;
|
||||||
|
// reset which owners have confirmed (none) - set our bitmap to 0.
|
||||||
|
pending.ownersDone = 0;
|
||||||
|
pending.index = m_pendingIndex.length++;
|
||||||
|
m_pendingIndex[pending.index] = _operation;
|
||||||
|
}
|
||||||
|
// determine the bit to set for this owner.
|
||||||
|
uint ownerIndexBit = 2**ownerIndex;
|
||||||
|
// make sure we (the message sender) haven't confirmed this operation previously.
|
||||||
|
if (pending.ownersDone & ownerIndexBit == 0) {
|
||||||
|
Confirmation(msg.sender, _operation);
|
||||||
|
// ok - check if count is enough to go ahead.
|
||||||
|
if (pending.yetNeeded <= 1) {
|
||||||
|
// enough confirmations: reset and run interior.
|
||||||
|
delete m_pendingIndex[m_pending[_operation].index];
|
||||||
|
delete m_pending[_operation];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// not enough: record that this owner in particular confirmed.
|
||||||
|
pending.yetNeeded--;
|
||||||
|
pending.ownersDone |= ownerIndexBit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function reorganizeOwners() private {
|
||||||
|
uint free = 1;
|
||||||
|
while (free < m_numOwners)
|
||||||
|
{
|
||||||
|
while (free < m_numOwners && m_owners[free] != 0) free++;
|
||||||
|
while (m_numOwners > 1 && m_owners[m_numOwners] == 0) m_numOwners--;
|
||||||
|
if (free < m_numOwners && m_owners[m_numOwners] != 0 && m_owners[free] == 0)
|
||||||
|
{
|
||||||
|
m_owners[free] = m_owners[m_numOwners];
|
||||||
|
m_ownerIndex[m_owners[free]] = free;
|
||||||
|
m_owners[m_numOwners] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function clearPending() internal {
|
||||||
|
uint length = m_pendingIndex.length;
|
||||||
|
for (uint i = 0; i < length; ++i)
|
||||||
|
if (m_pendingIndex[i] != 0)
|
||||||
|
delete m_pending[m_pendingIndex[i]];
|
||||||
|
delete m_pendingIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIELDS
|
||||||
|
|
||||||
|
// the number of owners that must confirm the same operation before it is run.
|
||||||
|
uint public m_required;
|
||||||
|
// pointer used to find a free slot in m_owners
|
||||||
|
uint public m_numOwners;
|
||||||
|
|
||||||
|
// list of owners
|
||||||
|
uint[256] m_owners;
|
||||||
|
uint constant c_maxOwners = 250;
|
||||||
|
// index on the list of owners to allow reverse lookup
|
||||||
|
mapping(uint => uint) m_ownerIndex;
|
||||||
|
// the ongoing operations.
|
||||||
|
mapping(bytes32 => PendingState) m_pending;
|
||||||
|
bytes32[] m_pendingIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
// inheritable "property" contract that enables methods to be protected by placing a linear limit (specifiable)
|
||||||
|
// on a particular resource per calendar day. is multiowned to allow the limit to be altered. resource that method
|
||||||
|
// uses is specified in the modifier.
|
||||||
|
contract daylimit is multiowned {
|
||||||
|
|
||||||
|
// MODIFIERS
|
||||||
|
|
||||||
|
// simple modifier for daily limit.
|
||||||
|
modifier limitedDaily(uint _value) {
|
||||||
|
if (underLimit(_value))
|
||||||
|
_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// METHODS
|
||||||
|
|
||||||
|
// constructor - stores initial daily limit and records the present day's index.
|
||||||
|
function daylimit(uint _limit) {
|
||||||
|
m_dailyLimit = _limit;
|
||||||
|
m_lastDay = today();
|
||||||
|
}
|
||||||
|
// (re)sets the daily limit. needs many of the owners to confirm. doesn't alter the amount already spent today.
|
||||||
|
function setDailyLimit(uint _newLimit) onlymanyowners(sha3(msg.data)) external {
|
||||||
|
m_dailyLimit = _newLimit;
|
||||||
|
}
|
||||||
|
// resets the amount already spent today. needs many of the owners to confirm.
|
||||||
|
function resetSpentToday() onlymanyowners(sha3(msg.data)) external {
|
||||||
|
m_spentToday = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// INTERNAL METHODS
|
||||||
|
|
||||||
|
// checks to see if there is at least `_value` left from the daily limit today. if there is, subtracts it and
|
||||||
|
// returns true. otherwise just returns false.
|
||||||
|
function underLimit(uint _value) internal onlyowner returns (bool) {
|
||||||
|
// reset the spend limit if we're on a different day to last time.
|
||||||
|
if (today() > m_lastDay) {
|
||||||
|
m_spentToday = 0;
|
||||||
|
m_lastDay = today();
|
||||||
|
}
|
||||||
|
// check to see if there's enough left - if so, subtract and return true.
|
||||||
|
// overflow protection // dailyLimit check
|
||||||
|
if (m_spentToday + _value >= m_spentToday && m_spentToday + _value <= m_dailyLimit) {
|
||||||
|
m_spentToday += _value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// determines today's index.
|
||||||
|
function today() private constant returns (uint) { return now / 1 days; }
|
||||||
|
|
||||||
|
// FIELDS
|
||||||
|
|
||||||
|
uint public m_dailyLimit;
|
||||||
|
uint public m_spentToday;
|
||||||
|
uint public m_lastDay;
|
||||||
|
}
|
||||||
|
|
||||||
|
// interface contract for multisig proxy contracts; see below for docs.
|
||||||
|
contract multisig {
|
||||||
|
|
||||||
|
// EVENTS
|
||||||
|
|
||||||
|
// logged events:
|
||||||
|
// Funds has arrived into the wallet (record how much).
|
||||||
|
event Deposit(address _from, uint value);
|
||||||
|
// Single transaction going out of the wallet (record who signed for it, how much, and to whom it's going).
|
||||||
|
event SingleTransact(address owner, uint value, address to, bytes data);
|
||||||
|
// Multi-sig transaction going out of the wallet (record who signed for it last, the operation hash, how much, and to whom it's going).
|
||||||
|
event MultiTransact(address owner, bytes32 operation, uint value, address to, bytes data);
|
||||||
|
// Confirmation still needed for a transaction.
|
||||||
|
event ConfirmationNeeded(bytes32 operation, address initiator, uint value, address to, bytes data);
|
||||||
|
|
||||||
|
// FUNCTIONS
|
||||||
|
|
||||||
|
// TODO: document
|
||||||
|
function changeOwner(address _from, address _to) external;
|
||||||
|
function execute(address _to, uint _value, bytes _data) external returns (bytes32);
|
||||||
|
function confirm(bytes32 _h) returns (bool);
|
||||||
|
}
|
||||||
|
|
||||||
|
// usage:
|
||||||
|
// bytes32 h = Wallet(w).from(oneOwner).execute(to, value, data);
|
||||||
|
// Wallet(w).from(anotherOwner).confirm(h);
|
||||||
|
contract Wallet is multisig, multiowned, daylimit {
|
||||||
|
|
||||||
|
// TYPES
|
||||||
|
|
||||||
|
// Transaction structure to remember details of transaction lest it need be saved for a later call.
|
||||||
|
struct Transaction {
|
||||||
|
address to;
|
||||||
|
uint value;
|
||||||
|
bytes data;
|
||||||
|
}
|
||||||
|
|
||||||
|
// METHODS
|
||||||
|
|
||||||
|
// constructor - just pass on the owner array to the multiowned and
|
||||||
|
// the limit to daylimit
|
||||||
|
function Wallet(address[] _owners, uint _required, uint _daylimit)
|
||||||
|
multiowned(_owners, _required) daylimit(_daylimit) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// kills the contract sending everything to `_to`.
|
||||||
|
function kill(address _to) onlymanyowners(sha3(msg.data)) external {
|
||||||
|
suicide(_to);
|
||||||
|
}
|
||||||
|
|
||||||
|
// gets called when no other function matches
|
||||||
|
function() payable {
|
||||||
|
// just being sent some cash?
|
||||||
|
if (msg.value > 0)
|
||||||
|
Deposit(msg.sender, msg.value);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outside-visible transact entry point. Executes transaction immediately if below daily spend limit.
|
||||||
|
// If not, goes into multisig process. We provide a hash on return to allow the sender to provide
|
||||||
|
// shortcuts for the other confirmations (allowing them to avoid replicating the _to, _value
|
||||||
|
// and _data arguments). They still get the option of using them if they want, anyways.
|
||||||
|
function execute(address _to, uint _value, bytes _data) external onlyowner returns (bytes32 _r) {
|
||||||
|
// first, take the opportunity to check that we're under the daily limit.
|
||||||
|
if (underLimit(_value)) {
|
||||||
|
SingleTransact(msg.sender, _value, _to, _data);
|
||||||
|
// yes - just execute the call.
|
||||||
|
_to.call.value(_value)(_data);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
// determine our operation hash.
|
||||||
|
_r = sha3(msg.data, block.number);
|
||||||
|
if (!confirm(_r) && m_txs[_r].to == 0) {
|
||||||
|
m_txs[_r].to = _to;
|
||||||
|
m_txs[_r].value = _value;
|
||||||
|
m_txs[_r].data = _data;
|
||||||
|
ConfirmationNeeded(_r, msg.sender, _value, _to, _data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// confirm a transaction through just the hash. we use the previous transactions map, m_txs, in order
|
||||||
|
// to determine the body of the transaction from the hash provided.
|
||||||
|
function confirm(bytes32 _h) onlymanyowners(_h) returns (bool) {
|
||||||
|
if (m_txs[_h].to != 0) {
|
||||||
|
m_txs[_h].to.call.value(m_txs[_h].value)(m_txs[_h].data);
|
||||||
|
MultiTransact(msg.sender, _h, m_txs[_h].value, m_txs[_h].to, m_txs[_h].data);
|
||||||
|
delete m_txs[_h];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// INTERNAL METHODS
|
||||||
|
|
||||||
|
function clearPending() internal {
|
||||||
|
uint length = m_pendingIndex.length;
|
||||||
|
for (uint i = 0; i < length; ++i)
|
||||||
|
delete m_txs[m_pendingIndex[i]];
|
||||||
|
super.clearPending();
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIELDS
|
||||||
|
|
||||||
|
// pending transactions we have at present.
|
||||||
|
mapping (bytes32 => Transaction) m_txs;
|
||||||
|
}
|
@ -16,8 +16,7 @@
|
|||||||
|
|
||||||
import ReactDOM from 'react-dom';
|
import ReactDOM from 'react-dom';
|
||||||
import React from 'react';
|
import React from 'react';
|
||||||
import { createHashHistory } from 'history';
|
import { Redirect, Router, Route, hashHistory } from 'react-router';
|
||||||
import { Redirect, Router, Route, useRouterHistory } from 'react-router';
|
|
||||||
|
|
||||||
import injectTapEventPlugin from 'react-tap-event-plugin';
|
import injectTapEventPlugin from 'react-tap-event-plugin';
|
||||||
injectTapEventPlugin();
|
injectTapEventPlugin();
|
||||||
@ -27,14 +26,12 @@ import Application from './basiccoin/Application';
|
|||||||
import Overview from './basiccoin/Overview';
|
import Overview from './basiccoin/Overview';
|
||||||
import Transfer from './basiccoin/Transfer';
|
import Transfer from './basiccoin/Transfer';
|
||||||
|
|
||||||
const routerHistory = useRouterHistory(createHashHistory)({});
|
|
||||||
|
|
||||||
import '../../assets/fonts/Roboto/font.css';
|
import '../../assets/fonts/Roboto/font.css';
|
||||||
import '../../assets/fonts/RobotoMono/font.css';
|
import '../../assets/fonts/RobotoMono/font.css';
|
||||||
import './style.css';
|
import './style.css';
|
||||||
|
|
||||||
ReactDOM.render(
|
ReactDOM.render(
|
||||||
<Router history={ routerHistory }>
|
<Router history={ hashHistory }>
|
||||||
<Redirect from='/' to='/overview' />
|
<Redirect from='/' to='/overview' />
|
||||||
<Route path='/' component={ Application }>
|
<Route path='/' component={ Application }>
|
||||||
<Route path='deploy' component={ Deploy } />
|
<Route path='deploy' component={ Deploy } />
|
||||||
|
@ -94,7 +94,6 @@ export default class Application extends Component {
|
|||||||
tokenregInstance,
|
tokenregInstance,
|
||||||
accounts: Object
|
accounts: Object
|
||||||
.keys(accountsInfo)
|
.keys(accountsInfo)
|
||||||
.filter((address) => !accountsInfo[address].meta.deleted)
|
|
||||||
.sort((a, b) => {
|
.sort((a, b) => {
|
||||||
return (accountsInfo[b].uuid || '').localeCompare(accountsInfo[a].uuid || '');
|
return (accountsInfo[b].uuid || '').localeCompare(accountsInfo[a].uuid || '');
|
||||||
})
|
})
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
import BigNumber from 'bignumber.js';
|
import BigNumber from 'bignumber.js';
|
||||||
import React, { Component, PropTypes } from 'react';
|
import React, { Component, PropTypes } from 'react';
|
||||||
|
|
||||||
import { eip20 } from '../../../../contracts/abi';
|
import { eip20 } from '~/contracts/abi';
|
||||||
|
|
||||||
import { api } from '../../parity';
|
import { api } from '../../parity';
|
||||||
import { loadBalances } from '../../services';
|
import { loadBalances } from '../../services';
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
import BigNumber from 'bignumber.js';
|
import BigNumber from 'bignumber.js';
|
||||||
|
|
||||||
import * as abis from '../../contracts/abi';
|
import * as abis from '~/contracts/abi';
|
||||||
import { api } from './parity';
|
import { api } from './parity';
|
||||||
|
|
||||||
let managerInstance;
|
let managerInstance;
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
import BigNumber from 'bignumber.js';
|
import BigNumber from 'bignumber.js';
|
||||||
import { action, computed, observable, transaction } from 'mobx';
|
import { action, computed, observable, transaction } from 'mobx';
|
||||||
|
|
||||||
import * as abis from '../../contracts/abi';
|
import * as abis from '~/contracts/abi';
|
||||||
import builtins from '../../views/Dapps/builtin.json';
|
import builtins from '~/views/Dapps/builtin.json';
|
||||||
|
|
||||||
import { api } from './parity';
|
import { api } from './parity';
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import * as abis from '../../contracts/abi';
|
import * as abis from '~/contracts/abi';
|
||||||
import { api } from './parity';
|
import { api } from './parity';
|
||||||
|
|
||||||
export function attachInterface () {
|
export function attachInterface () {
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
<style>
|
<style>
|
||||||
html, body, #container {
|
html, body, #container {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: 100%;
|
min-height: 100%;
|
||||||
margin: 0;
|
margin: 0;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
background: white;
|
background: white;
|
||||||
|
@ -21,6 +21,9 @@ const muiTheme = getMuiTheme(lightBaseTheme);
|
|||||||
|
|
||||||
import CircularProgress from 'material-ui/CircularProgress';
|
import CircularProgress from 'material-ui/CircularProgress';
|
||||||
import { Card, CardText } from 'material-ui/Card';
|
import { Card, CardText } from 'material-ui/Card';
|
||||||
|
|
||||||
|
import { nullableProptype } from '~/util/proptypes';
|
||||||
|
|
||||||
import styles from './application.css';
|
import styles from './application.css';
|
||||||
import Accounts from '../Accounts';
|
import Accounts from '../Accounts';
|
||||||
import Events from '../Events';
|
import Events from '../Events';
|
||||||
@ -28,8 +31,6 @@ import Lookup from '../Lookup';
|
|||||||
import Names from '../Names';
|
import Names from '../Names';
|
||||||
import Records from '../Records';
|
import Records from '../Records';
|
||||||
|
|
||||||
const nullable = (type) => React.PropTypes.oneOfType([ React.PropTypes.oneOf([ null ]), type ]);
|
|
||||||
|
|
||||||
export default class Application extends Component {
|
export default class Application extends Component {
|
||||||
static childContextTypes = {
|
static childContextTypes = {
|
||||||
muiTheme: PropTypes.object.isRequired,
|
muiTheme: PropTypes.object.isRequired,
|
||||||
@ -44,8 +45,8 @@ export default class Application extends Component {
|
|||||||
actions: PropTypes.object.isRequired,
|
actions: PropTypes.object.isRequired,
|
||||||
accounts: PropTypes.object.isRequired,
|
accounts: PropTypes.object.isRequired,
|
||||||
contacts: PropTypes.object.isRequired,
|
contacts: PropTypes.object.isRequired,
|
||||||
contract: nullable(PropTypes.object.isRequired),
|
contract: nullableProptype(PropTypes.object.isRequired),
|
||||||
fee: nullable(PropTypes.object.isRequired),
|
fee: nullableProptype(PropTypes.object.isRequired),
|
||||||
lookup: PropTypes.object.isRequired,
|
lookup: PropTypes.object.isRequired,
|
||||||
events: PropTypes.object.isRequired,
|
events: PropTypes.object.isRequired,
|
||||||
names: PropTypes.object.isRequired,
|
names: PropTypes.object.isRequired,
|
||||||
|
@ -18,19 +18,19 @@ import React, { Component, PropTypes } from 'react';
|
|||||||
import { connect } from 'react-redux';
|
import { connect } from 'react-redux';
|
||||||
import { bindActionCreators } from 'redux';
|
import { bindActionCreators } from 'redux';
|
||||||
|
|
||||||
|
import { nullableProptype } from '~/util/proptypes';
|
||||||
|
|
||||||
import Application from './Application';
|
import Application from './Application';
|
||||||
import * as actions from './actions';
|
import * as actions from './actions';
|
||||||
|
|
||||||
const nullable = (type) => React.PropTypes.oneOfType([ React.PropTypes.oneOf([ null ]), type ]);
|
|
||||||
|
|
||||||
class Container extends Component {
|
class Container extends Component {
|
||||||
static propTypes = {
|
static propTypes = {
|
||||||
actions: PropTypes.object.isRequired,
|
actions: PropTypes.object.isRequired,
|
||||||
accounts: PropTypes.object.isRequired,
|
accounts: PropTypes.object.isRequired,
|
||||||
contacts: PropTypes.object.isRequired,
|
contacts: PropTypes.object.isRequired,
|
||||||
contract: nullable(PropTypes.object.isRequired),
|
contract: nullableProptype(PropTypes.object.isRequired),
|
||||||
owner: nullable(PropTypes.string.isRequired),
|
owner: nullableProptype(PropTypes.string.isRequired),
|
||||||
fee: nullable(PropTypes.object.isRequired),
|
fee: nullableProptype(PropTypes.object.isRequired),
|
||||||
lookup: PropTypes.object.isRequired,
|
lookup: PropTypes.object.isRequired,
|
||||||
events: PropTypes.object.isRequired
|
events: PropTypes.object.isRequired
|
||||||
};
|
};
|
||||||
|
@ -19,21 +19,22 @@ import { Card, CardHeader, CardText } from 'material-ui/Card';
|
|||||||
import TextField from 'material-ui/TextField';
|
import TextField from 'material-ui/TextField';
|
||||||
import RaisedButton from 'material-ui/RaisedButton';
|
import RaisedButton from 'material-ui/RaisedButton';
|
||||||
import SearchIcon from 'material-ui/svg-icons/action/search';
|
import SearchIcon from 'material-ui/svg-icons/action/search';
|
||||||
|
|
||||||
|
import { nullableProptype } from '~/util/proptypes';
|
||||||
|
|
||||||
import renderAddress from '../ui/address.js';
|
import renderAddress from '../ui/address.js';
|
||||||
import renderImage from '../ui/image.js';
|
import renderImage from '../ui/image.js';
|
||||||
|
|
||||||
import recordTypeSelect from '../ui/record-type-select.js';
|
import recordTypeSelect from '../ui/record-type-select.js';
|
||||||
import styles from './lookup.css';
|
import styles from './lookup.css';
|
||||||
|
|
||||||
const nullable = (type) => React.PropTypes.oneOfType([ React.PropTypes.oneOf([ null ]), type ]);
|
|
||||||
|
|
||||||
export default class Lookup extends Component {
|
export default class Lookup extends Component {
|
||||||
|
|
||||||
static propTypes = {
|
static propTypes = {
|
||||||
actions: PropTypes.object.isRequired,
|
actions: PropTypes.object.isRequired,
|
||||||
name: PropTypes.string.isRequired,
|
name: PropTypes.string.isRequired,
|
||||||
type: PropTypes.string.isRequired,
|
type: PropTypes.string.isRequired,
|
||||||
result: nullable(PropTypes.string.isRequired),
|
result: nullableProptype(PropTypes.string.isRequired),
|
||||||
accounts: PropTypes.object.isRequired,
|
accounts: PropTypes.object.isRequired,
|
||||||
contacts: PropTypes.object.isRequired
|
contacts: PropTypes.object.isRequired
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import React, { Component, PropTypes } from 'react';
|
import React, { Component, PropTypes } from 'react';
|
||||||
import { Card, CardHeader, CardText } from 'material-ui/Card';
|
import { Card, CardHeader, CardText } from 'material-ui/Card';
|
||||||
import TextField from 'material-ui/TextField';
|
import TextField from 'material-ui/TextField';
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import { registry as registryAbi } from '../../contracts/abi';
|
import { registry as registryAbi } from '~/contracts/abi';
|
||||||
|
|
||||||
import { api } from './parity.js';
|
import { api } from './parity.js';
|
||||||
import * as addresses from './addresses/actions.js';
|
import * as addresses from './addresses/actions.js';
|
||||||
|
@ -24,7 +24,6 @@ export const fetch = () => (dispatch) => {
|
|||||||
.then((accountsInfo) => {
|
.then((accountsInfo) => {
|
||||||
const addresses = Object
|
const addresses = Object
|
||||||
.keys(accountsInfo)
|
.keys(accountsInfo)
|
||||||
.filter((address) => accountsInfo[address] && !accountsInfo[address].meta.deleted)
|
|
||||||
.map((address) => ({
|
.map((address) => ({
|
||||||
...accountsInfo[address],
|
...accountsInfo[address],
|
||||||
address,
|
address,
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import * as abis from '../../contracts/abi';
|
import * as abis from '~/contracts/abi';
|
||||||
import { api } from './parity';
|
import { api } from './parity';
|
||||||
|
|
||||||
const sortEvents = (a, b) => b.blockNumber.cmp(a.blockNumber) || b.logIndex.cmp(a.logIndex);
|
const sortEvents = (a, b) => b.blockNumber.cmp(a.blockNumber) || b.logIndex.cmp(a.logIndex);
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import Contracts from '../../../contracts';
|
import Contracts from '~/contracts';
|
||||||
|
|
||||||
import { loadToken, setTokenPending, deleteToken, setTokenData } from '../Tokens/actions';
|
import { loadToken, setTokenPending, deleteToken, setTokenData } from '../Tokens/actions';
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
import { api } from './parity';
|
import { api } from './parity';
|
||||||
|
|
||||||
import { eip20 as eip20Abi } from '../../contracts/abi';
|
import { eip20 as eip20Abi } from '~/contracts/abi';
|
||||||
|
|
||||||
export const getTokenTotalSupply = (tokenAddress) => {
|
export const getTokenTotalSupply = (tokenAddress) => {
|
||||||
return api
|
return api
|
||||||
|
@ -25,19 +25,18 @@ import ReactDOM from 'react-dom';
|
|||||||
import { AppContainer } from 'react-hot-loader';
|
import { AppContainer } from 'react-hot-loader';
|
||||||
|
|
||||||
import injectTapEventPlugin from 'react-tap-event-plugin';
|
import injectTapEventPlugin from 'react-tap-event-plugin';
|
||||||
import { createHashHistory } from 'history';
|
import { hashHistory } from 'react-router';
|
||||||
import { useRouterHistory } from 'react-router';
|
|
||||||
import qs from 'querystring';
|
import qs from 'querystring';
|
||||||
|
|
||||||
import SecureApi from './secureApi';
|
import SecureApi from './secureApi';
|
||||||
import ContractInstances from './contracts';
|
import ContractInstances from '~/contracts';
|
||||||
|
|
||||||
import { initStore } from './redux';
|
import { initStore } from './redux';
|
||||||
import ContextProvider from './ui/ContextProvider';
|
import ContextProvider from '~/ui/ContextProvider';
|
||||||
import muiTheme from './ui/Theme';
|
import muiTheme from '~/ui/Theme';
|
||||||
import MainApplication from './main';
|
import MainApplication from './main';
|
||||||
|
|
||||||
import { setApi } from './redux/providers/apiActions';
|
import { setApi } from '~/redux/providers/apiActions';
|
||||||
|
|
||||||
import './environment';
|
import './environment';
|
||||||
|
|
||||||
@ -74,13 +73,11 @@ store.dispatch(setApi(api));
|
|||||||
|
|
||||||
window.secureApi = api;
|
window.secureApi = api;
|
||||||
|
|
||||||
const routerHistory = useRouterHistory(createHashHistory)({});
|
|
||||||
|
|
||||||
ReactDOM.render(
|
ReactDOM.render(
|
||||||
<AppContainer>
|
<AppContainer>
|
||||||
<ContextProvider api={ api } muiTheme={ muiTheme } store={ store }>
|
<ContextProvider api={ api } muiTheme={ muiTheme } store={ store }>
|
||||||
<MainApplication
|
<MainApplication
|
||||||
routerHistory={ routerHistory }
|
routerHistory={ hashHistory }
|
||||||
/>
|
/>
|
||||||
</ContextProvider>
|
</ContextProvider>
|
||||||
</AppContainer>,
|
</AppContainer>,
|
||||||
@ -88,24 +85,6 @@ ReactDOM.render(
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (module.hot) {
|
if (module.hot) {
|
||||||
// module.hot.accept('./redux', () => {
|
|
||||||
// // redux store has a method replaceReducer
|
|
||||||
// // const newStore = initStore(api);
|
|
||||||
// console.warn('REDUX UPDATE');
|
|
||||||
// // store.replaceReducer(appReducer);
|
|
||||||
|
|
||||||
// // ReactDOM.render(
|
|
||||||
// // <AppContainer>
|
|
||||||
// // <ContextProvider api={ api } muiTheme={ muiTheme } store={ newStore }>
|
|
||||||
// // <MainApplication
|
|
||||||
// // routerHistory={ routerHistory }
|
|
||||||
// // />
|
|
||||||
// // </ContextProvider>
|
|
||||||
// // </AppContainer>,
|
|
||||||
// // document.querySelector('#container')
|
|
||||||
// // );
|
|
||||||
// });
|
|
||||||
|
|
||||||
module.hot.accept('./main.js', () => {
|
module.hot.accept('./main.js', () => {
|
||||||
require('./main.js');
|
require('./main.js');
|
||||||
|
|
||||||
@ -113,7 +92,7 @@ if (module.hot) {
|
|||||||
<AppContainer>
|
<AppContainer>
|
||||||
<ContextProvider api={ api } muiTheme={ muiTheme } store={ store }>
|
<ContextProvider api={ api } muiTheme={ muiTheme } store={ store }>
|
||||||
<MainApplication
|
<MainApplication
|
||||||
routerHistory={ routerHistory }
|
routerHistory={ hashHistory }
|
||||||
/>
|
/>
|
||||||
</ContextProvider>
|
</ContextProvider>
|
||||||
</AppContainer>,
|
</AppContainer>,
|
||||||
|
@ -256,6 +256,20 @@ export default {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
removeAddress: {
|
||||||
|
desc: 'Removes an address from the addressbook',
|
||||||
|
params: [
|
||||||
|
{
|
||||||
|
type: Address,
|
||||||
|
desc: 'The address to remove'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
returns: {
|
||||||
|
type: Boolean,
|
||||||
|
desc: 'true on success'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
listGethAccounts: {
|
listGethAccounts: {
|
||||||
desc: 'Returns a list of the accounts available from Geth',
|
desc: 'Returns a list of the accounts available from Geth',
|
||||||
params: [],
|
params: [],
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user