Merge branch 'master' into optimizing-build
Conflicts: .travis.yml
This commit is contained in:
commit
cc19f11c4d
20
.travis.yml
20
.travis.yml
@ -21,6 +21,7 @@ matrix:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
global:
|
global:
|
||||||
|
- TRAVIS_NODE_VERSION="6"
|
||||||
- CXX="g++-4.8"
|
- CXX="g++-4.8"
|
||||||
- CC="gcc-4.8"
|
- CC="gcc-4.8"
|
||||||
- RUST_BACKTRACE="1"
|
- RUST_BACKTRACE="1"
|
||||||
@ -48,15 +49,16 @@ addons:
|
|||||||
- gcc-4.8
|
- gcc-4.8
|
||||||
- g++-4.8
|
- g++-4.8
|
||||||
|
|
||||||
install: |
|
install:
|
||||||
[ "$RUN_COVERAGE" = "false" ] ||
|
- rm -rf ~/.nvm && git clone https://github.com/creationix/nvm.git ~/.nvm && (cd ~/.nvm && git checkout `git describe --abbrev=0 --tags`) && source ~/.nvm/nvm.sh && nvm install $TRAVIS_NODE_VERSION
|
||||||
(wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz &&
|
- (true && [ "$RUN_COVERAGE" = "false" ]) ||
|
||||||
tar xzf master.tar.gz &&
|
(wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz &&
|
||||||
mkdir -p kcov-master/build &&
|
tar xzf master.tar.gz &&
|
||||||
cd kcov-master/build &&
|
mkdir -p kcov-master/build &&
|
||||||
cmake .. &&
|
cd kcov-master/build &&
|
||||||
make && make install DESTDIR=../tmp &&
|
cmake .. &&
|
||||||
cd)
|
make && make install DESTDIR=../tmp &&
|
||||||
|
cd)
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- if [ "$RUN_TESTS" = "true" ]; then ./test.sh; fi
|
- if [ "$RUN_TESTS" = "true" ]; then ./test.sh; fi
|
||||||
|
59
Cargo.lock
generated
59
Cargo.lock
generated
@ -3,7 +3,7 @@ name = "parity"
|
|||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
|
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
|
||||||
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
"docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -129,15 +129,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clippy"
|
name = "clippy"
|
||||||
version = "0.0.78"
|
version = "0.0.79"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy_lints 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy_lints 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clippy_lints"
|
name = "clippy_lints"
|
||||||
version = "0.0.78"
|
version = "0.0.79"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -239,7 +239,7 @@ name = "ethcore"
|
|||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethash 1.3.0",
|
"ethash 1.3.0",
|
||||||
@ -267,7 +267,7 @@ dependencies = [
|
|||||||
name = "ethcore-dapps"
|
name = "ethcore-dapps"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-rpc 1.3.0",
|
"ethcore-rpc 1.3.0",
|
||||||
"ethcore-util 1.3.0",
|
"ethcore-util 1.3.0",
|
||||||
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
|
||||||
@ -275,10 +275,10 @@ dependencies = [
|
|||||||
"jsonrpc-http-server 5.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
|
"jsonrpc-http-server 5.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
|
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
"parity-dapps-builtins 0.5.2 (git+https://github.com/ethcore/parity-dapps-builtins-rs.git)",
|
"parity-dapps-home 0.5.2 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
"parity-dapps-status 0.5.1 (git+https://github.com/ethcore/parity-dapps-status-rs.git)",
|
"parity-dapps-status 0.5.1 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
"parity-dapps-wallet 0.6.1 (git+https://github.com/ethcore/parity-dapps-wallet-rs.git)",
|
"parity-dapps-wallet 0.6.1 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -343,7 +343,7 @@ dependencies = [
|
|||||||
name = "ethcore-rpc"
|
name = "ethcore-rpc"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethash 1.3.0",
|
"ethash 1.3.0",
|
||||||
"ethcore 1.3.0",
|
"ethcore 1.3.0",
|
||||||
"ethcore-devtools 1.3.0",
|
"ethcore-devtools 1.3.0",
|
||||||
@ -367,13 +367,13 @@ dependencies = [
|
|||||||
name = "ethcore-signer"
|
name = "ethcore-signer"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-rpc 1.3.0",
|
"ethcore-rpc 1.3.0",
|
||||||
"ethcore-util 1.3.0",
|
"ethcore-util 1.3.0",
|
||||||
"jsonrpc-core 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jsonrpc-core 2.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-minimal-sysui 0.2.0 (git+https://github.com/ethcore/parity-dapps-minimal-sysui-rs.git)",
|
"parity-dapps-signer 0.2.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ws 0.5.0 (git+https://github.com/ethcore/ws-rs.git?branch=stable)",
|
"ws 0.5.0 (git+https://github.com/ethcore/ws-rs.git?branch=stable)",
|
||||||
@ -387,7 +387,7 @@ dependencies = [
|
|||||||
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bigint 0.1.0",
|
"bigint 0.1.0",
|
||||||
"chrono 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
"chrono 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -459,7 +459,7 @@ dependencies = [
|
|||||||
name = "ethsync"
|
name = "ethsync"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clippy 0.0.78 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore 1.3.0",
|
"ethcore 1.3.0",
|
||||||
"ethcore-util 1.3.0",
|
"ethcore-util 1.3.0",
|
||||||
@ -862,7 +862,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-dapps"
|
name = "parity-dapps"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
source = "git+https://github.com/ethcore/parity-dapps-rs.git#8ce18c014d8b69fa31fb203b68ff240091d77a23"
|
source = "git+https://github.com/ethcore/parity-ui.git#4c5a972e81379cbf96fe6119a127d3540de14b1b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -874,34 +874,37 @@ dependencies = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-dapps-builtins"
|
name = "parity-dapps-home"
|
||||||
version = "0.5.2"
|
version = "0.5.2"
|
||||||
source = "git+https://github.com/ethcore/parity-dapps-builtins-rs.git#01af2091d5d70dfe0aecbfd96308f0ae79fc61e6"
|
source = "git+https://github.com/ethcore/parity-ui.git#4c5a972e81379cbf96fe6119a127d3540de14b1b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
|
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "parity-dapps-signer"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "git+https://github.com/ethcore/parity-ui.git#4c5a972e81379cbf96fe6119a127d3540de14b1b"
|
||||||
|
dependencies = [
|
||||||
|
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-dapps-status"
|
name = "parity-dapps-status"
|
||||||
version = "0.5.1"
|
version = "0.5.1"
|
||||||
source = "git+https://github.com/ethcore/parity-dapps-status-rs.git#110ef2e66142ec8dc15fc40b8ddda5ed3bcfc1fb"
|
source = "git+https://github.com/ethcore/parity-ui.git#4c5a972e81379cbf96fe6119a127d3540de14b1b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
|
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-dapps-wallet"
|
name = "parity-dapps-wallet"
|
||||||
version = "0.6.1"
|
version = "0.6.1"
|
||||||
source = "git+https://github.com/ethcore/parity-dapps-wallet-rs.git#867994fe25038f000f1cc09cd024a83700a03930"
|
source = "git+https://github.com/ethcore/parity-ui.git#4c5a972e81379cbf96fe6119a127d3540de14b1b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-dapps-rs.git)",
|
"parity-dapps 0.3.0 (git+https://github.com/ethcore/parity-ui.git)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "parity-minimal-sysui"
|
|
||||||
version = "0.2.0"
|
|
||||||
source = "git+https://github.com/ethcore/parity-dapps-minimal-sysui-rs.git#996c9f3f0ebedc727aecb4ece191154e956ae292"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "phf"
|
name = "phf"
|
||||||
version = "0.7.14"
|
version = "0.7.14"
|
||||||
|
@ -22,13 +22,13 @@ fdlimit = { path = "util/fdlimit" }
|
|||||||
num_cpus = "0.2"
|
num_cpus = "0.2"
|
||||||
number_prefix = "0.2"
|
number_prefix = "0.2"
|
||||||
rpassword = "0.2.1"
|
rpassword = "0.2.1"
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
ethcore = { path = "ethcore" }
|
ethcore = { path = "ethcore" }
|
||||||
ethcore-util = { path = "util" }
|
ethcore-util = { path = "util" }
|
||||||
ethsync = { path = "sync" }
|
ethsync = { path = "sync" }
|
||||||
ethcore-devtools = { path = "devtools" }
|
ethcore-devtools = { path = "devtools" }
|
||||||
ethcore-rpc = { path = "rpc", optional = true }
|
ethcore-rpc = { path = "rpc" }
|
||||||
ethcore-signer = { path = "signer", optional = true }
|
ethcore-signer = { path = "signer" }
|
||||||
ethcore-dapps = { path = "dapps", optional = true }
|
ethcore-dapps = { path = "dapps", optional = true }
|
||||||
semver = "0.2"
|
semver = "0.2"
|
||||||
ethcore-ipc-nano = { path = "ipc/nano" }
|
ethcore-ipc-nano = { path = "ipc/nano" }
|
||||||
@ -48,8 +48,7 @@ version = "0.8"
|
|||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["rpc", "dapps", "ethcore-signer"]
|
default = ["dapps"]
|
||||||
rpc = ["ethcore-rpc"]
|
|
||||||
dapps = ["ethcore-dapps"]
|
dapps = ["ethcore-dapps"]
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev",
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev",
|
||||||
"ethcore-dapps/dev", "ethcore-signer/dev"]
|
"ethcore-dapps/dev", "ethcore-signer/dev"]
|
||||||
|
@ -17,6 +17,7 @@ branches:
|
|||||||
|
|
||||||
install:
|
install:
|
||||||
- git submodule update --init --recursive
|
- git submodule update --init --recursive
|
||||||
|
- ps: Install-Product node 6
|
||||||
- ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.9.0-x86_64-pc-windows-msvc.exe"
|
- ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.9.0-x86_64-pc-windows-msvc.exe"
|
||||||
- ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -FileName nsis\SimpleFC.dll
|
- ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -FileName nsis\SimpleFC.dll
|
||||||
- ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -FileName nsis\vc_redist.x64.exe
|
- ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -FileName nsis\vc_redist.x64.exe
|
||||||
@ -24,6 +25,8 @@ install:
|
|||||||
- SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin;C:\Program Files (x86)\NSIS;C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Bin
|
- SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin;C:\Program Files (x86)\NSIS;C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Bin
|
||||||
- rustc -V
|
- rustc -V
|
||||||
- cargo -V
|
- cargo -V
|
||||||
|
- node -v
|
||||||
|
- npm -v
|
||||||
|
|
||||||
build: off
|
build: off
|
||||||
|
|
||||||
|
@ -21,15 +21,13 @@ serde_json = "0.7.0"
|
|||||||
serde_macros = { version = "0.7.0", optional = true }
|
serde_macros = { version = "0.7.0", optional = true }
|
||||||
ethcore-rpc = { path = "../rpc" }
|
ethcore-rpc = { path = "../rpc" }
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
parity-dapps = { git = "https://github.com/ethcore/parity-dapps-rs.git", version = "0.3" }
|
parity-dapps = { git = "https://github.com/ethcore/parity-ui.git", version = "0.3" }
|
||||||
# List of apps
|
# List of apps
|
||||||
parity-dapps-status = { git = "https://github.com/ethcore/parity-dapps-status-rs.git", version = "0.5.1" }
|
parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", version = "0.5.1" }
|
||||||
parity-dapps-builtins = { git = "https://github.com/ethcore/parity-dapps-builtins-rs.git", version = "0.5.2" }
|
parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "0.5.2" }
|
||||||
parity-dapps-wallet = { git = "https://github.com/ethcore/parity-dapps-wallet-rs.git", version = "0.6.0", optional = true }
|
parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "0.6.0", optional = true }
|
||||||
parity-dapps-dao = { git = "https://github.com/ethcore/parity-dapps-dao-rs.git", version = "0.4.0", optional = true }
|
|
||||||
parity-dapps-makerotc = { git = "https://github.com/ethcore/parity-dapps-makerotc-rs.git", version = "0.3.0", optional = true }
|
|
||||||
mime_guess = { version = "1.6.1" }
|
mime_guess = { version = "1.6.1" }
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
serde_codegen = { version = "0.7.0", optional = true }
|
serde_codegen = { version = "0.7.0", optional = true }
|
||||||
|
@ -22,7 +22,7 @@ use parity_dapps::WebApp;
|
|||||||
mod fs;
|
mod fs;
|
||||||
|
|
||||||
extern crate parity_dapps_status;
|
extern crate parity_dapps_status;
|
||||||
extern crate parity_dapps_builtins;
|
extern crate parity_dapps_home;
|
||||||
|
|
||||||
pub const DAPPS_DOMAIN : &'static str = ".parity";
|
pub const DAPPS_DOMAIN : &'static str = ".parity";
|
||||||
pub const RPC_PATH : &'static str = "rpc";
|
pub const RPC_PATH : &'static str = "rpc";
|
||||||
@ -34,7 +34,7 @@ pub fn main_page() -> &'static str {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn utils() -> Box<Endpoint> {
|
pub fn utils() -> Box<Endpoint> {
|
||||||
Box::new(PageEndpoint::with_prefix(parity_dapps_builtins::App::default(), UTILS_PATH.to_owned()))
|
Box::new(PageEndpoint::with_prefix(parity_dapps_home::App::default(), UTILS_PATH.to_owned()))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn all_endpoints(dapps_path: String) -> Endpoints {
|
pub fn all_endpoints(dapps_path: String) -> Endpoints {
|
||||||
@ -44,7 +44,7 @@ pub fn all_endpoints(dapps_path: String) -> Endpoints {
|
|||||||
// because we use Cross-Origin LocalStorage.
|
// because we use Cross-Origin LocalStorage.
|
||||||
// TODO [ToDr] Account naming should be moved to parity.
|
// TODO [ToDr] Account naming should be moved to parity.
|
||||||
pages.insert("home".into(), Box::new(
|
pages.insert("home".into(), Box::new(
|
||||||
PageEndpoint::new_safe_to_embed(parity_dapps_builtins::App::default())
|
PageEndpoint::new_safe_to_embed(parity_dapps_home::App::default())
|
||||||
));
|
));
|
||||||
pages.insert("proxy".into(), ProxyPac::boxed());
|
pages.insert("proxy".into(), ProxyPac::boxed());
|
||||||
insert::<parity_dapps_status::App>(&mut pages, "parity");
|
insert::<parity_dapps_status::App>(&mut pages, "parity");
|
||||||
@ -52,8 +52,6 @@ pub fn all_endpoints(dapps_path: String) -> Endpoints {
|
|||||||
|
|
||||||
// Optional dapps
|
// Optional dapps
|
||||||
wallet_page(&mut pages);
|
wallet_page(&mut pages);
|
||||||
daodapp_page(&mut pages);
|
|
||||||
makerotc_page(&mut pages);
|
|
||||||
|
|
||||||
pages
|
pages
|
||||||
}
|
}
|
||||||
@ -66,22 +64,6 @@ fn wallet_page(pages: &mut Endpoints) {
|
|||||||
#[cfg(not(feature = "parity-dapps-wallet"))]
|
#[cfg(not(feature = "parity-dapps-wallet"))]
|
||||||
fn wallet_page(_pages: &mut Endpoints) {}
|
fn wallet_page(_pages: &mut Endpoints) {}
|
||||||
|
|
||||||
#[cfg(feature = "parity-dapps-dao")]
|
|
||||||
fn daodapp_page(pages: &mut Endpoints) {
|
|
||||||
extern crate parity_dapps_dao;
|
|
||||||
insert::<parity_dapps_dao::App>(pages, "dao");
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "parity-dapps-dao"))]
|
|
||||||
fn daodapp_page(_pages: &mut Endpoints) {}
|
|
||||||
|
|
||||||
#[cfg(feature = "parity-dapps-makerotc")]
|
|
||||||
fn makerotc_page(pages: &mut Endpoints) {
|
|
||||||
extern crate parity_dapps_makerotc;
|
|
||||||
insert::<parity_dapps_makerotc::App>(pages, "makerotc");
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "parity-dapps-makerotc"))]
|
|
||||||
fn makerotc_page(_pages: &mut Endpoints) {}
|
|
||||||
|
|
||||||
fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str) {
|
fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str) {
|
||||||
pages.insert(id.to_owned(), Box::new(PageEndpoint::new(T::default())));
|
pages.insert(id.to_owned(), Box::new(PageEndpoint::new(T::default())));
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ syntex = "*"
|
|||||||
ethcore-ipc-codegen = { path = "../ipc/codegen" }
|
ethcore-ipc-codegen = { path = "../ipc/codegen" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
ethcore-devtools = { path = "../devtools" }
|
ethcore-devtools = { path = "../devtools" }
|
||||||
ethcore-ipc = { path = "../ipc/rpc" }
|
ethcore-ipc = { path = "../ipc/rpc" }
|
||||||
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }
|
rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" }
|
||||||
|
@ -60,7 +60,7 @@ impl WriteCache {
|
|||||||
self.entries.insert(key, WriteCacheEntry::Remove);
|
self.entries.insert(key, WriteCacheEntry::Remove);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, key: &Vec<u8>) -> Option<Vec<u8>> {
|
fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
|
||||||
self.entries.get(key).and_then(
|
self.entries.get(key).and_then(
|
||||||
|vec_ref| match vec_ref {
|
|vec_ref| match vec_ref {
|
||||||
&WriteCacheEntry::Write(ref val) => Some(val.clone()),
|
&WriteCacheEntry::Write(ref val) => Some(val.clone()),
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
use std::path::*;
|
use std::path::*;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::env;
|
use std::env;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
use rand::random;
|
use rand::random;
|
||||||
|
|
||||||
pub struct RandomTempPath {
|
pub struct RandomTempPath {
|
||||||
@ -93,6 +94,16 @@ impl<T> GuardedTempResult<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Deref for GuardedTempResult<T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &T { self.result.as_ref().unwrap() }
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> DerefMut for GuardedTempResult<T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut T { self.result.as_mut().unwrap() }
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn creates_dir() {
|
fn creates_dir() {
|
||||||
let temp = RandomTempPath::create_dir();
|
let temp = RandomTempPath::create_dir();
|
||||||
|
@ -22,7 +22,7 @@ ethcore-util = { path = "../util" }
|
|||||||
evmjit = { path = "../evmjit", optional = true }
|
evmjit = { path = "../evmjit", optional = true }
|
||||||
ethash = { path = "../ethash" }
|
ethash = { path = "../ethash" }
|
||||||
num_cpus = "0.2"
|
num_cpus = "0.2"
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
crossbeam = "0.2.9"
|
crossbeam = "0.2.9"
|
||||||
lazy_static = "0.2"
|
lazy_static = "0.2"
|
||||||
ethcore-devtools = { path = "../devtools" }
|
ethcore-devtools = { path = "../devtools" }
|
||||||
|
@ -16,8 +16,6 @@
|
|||||||
|
|
||||||
//! Blockchain block.
|
//! Blockchain block.
|
||||||
|
|
||||||
#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
|
|
||||||
|
|
||||||
use common::*;
|
use common::*;
|
||||||
use engine::*;
|
use engine::*;
|
||||||
use state::*;
|
use state::*;
|
||||||
@ -26,7 +24,7 @@ use trace::Trace;
|
|||||||
use evm::Factory as EvmFactory;
|
use evm::Factory as EvmFactory;
|
||||||
|
|
||||||
/// A block, encoded as it is on the block chain.
|
/// A block, encoded as it is on the block chain.
|
||||||
#[derive(Default, Debug, Clone)]
|
#[derive(Default, Debug, Clone, PartialEq)]
|
||||||
pub struct Block {
|
pub struct Block {
|
||||||
/// The header of this block.
|
/// The header of this block.
|
||||||
pub header: Header,
|
pub header: Header,
|
||||||
@ -76,11 +74,11 @@ pub struct BlockRefMut<'a> {
|
|||||||
/// Block header.
|
/// Block header.
|
||||||
pub header: &'a mut Header,
|
pub header: &'a mut Header,
|
||||||
/// Block transactions.
|
/// Block transactions.
|
||||||
pub transactions: &'a Vec<SignedTransaction>,
|
pub transactions: &'a [SignedTransaction],
|
||||||
/// Block uncles.
|
/// Block uncles.
|
||||||
pub uncles: &'a Vec<Header>,
|
pub uncles: &'a [Header],
|
||||||
/// Transaction receipts.
|
/// Transaction receipts.
|
||||||
pub receipts: &'a Vec<Receipt>,
|
pub receipts: &'a [Receipt],
|
||||||
/// State.
|
/// State.
|
||||||
pub state: &'a mut State,
|
pub state: &'a mut State,
|
||||||
/// Traces.
|
/// Traces.
|
||||||
@ -92,11 +90,11 @@ pub struct BlockRef<'a> {
|
|||||||
/// Block header.
|
/// Block header.
|
||||||
pub header: &'a Header,
|
pub header: &'a Header,
|
||||||
/// Block transactions.
|
/// Block transactions.
|
||||||
pub transactions: &'a Vec<SignedTransaction>,
|
pub transactions: &'a [SignedTransaction],
|
||||||
/// Block uncles.
|
/// Block uncles.
|
||||||
pub uncles: &'a Vec<Header>,
|
pub uncles: &'a [Header],
|
||||||
/// Transaction receipts.
|
/// Transaction receipts.
|
||||||
pub receipts: &'a Vec<Receipt>,
|
pub receipts: &'a [Receipt],
|
||||||
/// State.
|
/// State.
|
||||||
pub state: &'a State,
|
pub state: &'a State,
|
||||||
/// Traces.
|
/// Traces.
|
||||||
@ -152,16 +150,16 @@ pub trait IsBlock {
|
|||||||
fn state(&self) -> &State { &self.block().state }
|
fn state(&self) -> &State { &self.block().state }
|
||||||
|
|
||||||
/// Get all information on transactions in this block.
|
/// Get all information on transactions in this block.
|
||||||
fn transactions(&self) -> &Vec<SignedTransaction> { &self.block().base.transactions }
|
fn transactions(&self) -> &[SignedTransaction] { &self.block().base.transactions }
|
||||||
|
|
||||||
/// Get all information on receipts in this block.
|
/// Get all information on receipts in this block.
|
||||||
fn receipts(&self) -> &Vec<Receipt> { &self.block().receipts }
|
fn receipts(&self) -> &[Receipt] { &self.block().receipts }
|
||||||
|
|
||||||
/// Get all information concerning transaction tracing in this block.
|
/// Get all information concerning transaction tracing in this block.
|
||||||
fn traces(&self) -> &Option<Vec<Trace>> { &self.block().traces }
|
fn traces(&self) -> &Option<Vec<Trace>> { &self.block().traces }
|
||||||
|
|
||||||
/// Get all uncles in this block.
|
/// Get all uncles in this block.
|
||||||
fn uncles(&self) -> &Vec<Header> { &self.block().base.uncles }
|
fn uncles(&self) -> &[Header] { &self.block().base.uncles }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Trait for a object that has a state database.
|
/// Trait for a object that has a state database.
|
||||||
|
@ -104,7 +104,7 @@ struct VerifyingBlock {
|
|||||||
struct QueueSignal {
|
struct QueueSignal {
|
||||||
deleting: Arc<AtomicBool>,
|
deleting: Arc<AtomicBool>,
|
||||||
signalled: AtomicBool,
|
signalled: AtomicBool,
|
||||||
message_channel: IoChannel<NetSyncMessage>,
|
message_channel: IoChannel<ClientIoMessage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl QueueSignal {
|
impl QueueSignal {
|
||||||
@ -116,7 +116,7 @@ impl QueueSignal {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
|
||||||
if let Err(e) = self.message_channel.send(UserMessage(SyncMessage::BlockVerified)) {
|
if let Err(e) = self.message_channel.send(ClientIoMessage::BlockVerified) {
|
||||||
debug!("Error sending BlockVerified message: {:?}", e);
|
debug!("Error sending BlockVerified message: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -137,7 +137,7 @@ struct Verification {
|
|||||||
|
|
||||||
impl BlockQueue {
|
impl BlockQueue {
|
||||||
/// Creates a new queue instance.
|
/// Creates a new queue instance.
|
||||||
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
|
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<ClientIoMessage>) -> BlockQueue {
|
||||||
let verification = Arc::new(Verification {
|
let verification = Arc::new(Verification {
|
||||||
unverified: Mutex::new(VecDeque::new()),
|
unverified: Mutex::new(VecDeque::new()),
|
||||||
verified: Mutex::new(VecDeque::new()),
|
verified: Mutex::new(VecDeque::new()),
|
||||||
|
40
ethcore/src/client/chain_notify.rs
Normal file
40
ethcore/src/client/chain_notify.rs
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use util::numbers::*;
|
||||||
|
|
||||||
|
/// Represents what has to be handled by actor listening to chain events
|
||||||
|
pub trait ChainNotify : Send + Sync {
|
||||||
|
/// fires when chain has new blocks
|
||||||
|
fn new_blocks(&self,
|
||||||
|
_imported: Vec<H256>,
|
||||||
|
_invalid: Vec<H256>,
|
||||||
|
_enacted: Vec<H256>,
|
||||||
|
_retracted: Vec<H256>,
|
||||||
|
_sealed: Vec<H256>) {
|
||||||
|
// does nothing by default
|
||||||
|
}
|
||||||
|
|
||||||
|
/// fires when chain achieves active mode
|
||||||
|
fn start(&self) {
|
||||||
|
// does nothing by default
|
||||||
|
}
|
||||||
|
|
||||||
|
/// fires when chain achieves passive mode
|
||||||
|
fn stop(&self) {
|
||||||
|
// does nothing by default
|
||||||
|
}
|
||||||
|
}
|
@ -14,13 +14,12 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::sync::*;
|
use std::sync::*;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
@ -28,7 +27,6 @@ use std::time::Instant;
|
|||||||
// util
|
// util
|
||||||
use util::numbers::*;
|
use util::numbers::*;
|
||||||
use util::panics::*;
|
use util::panics::*;
|
||||||
use util::network::*;
|
|
||||||
use util::io::*;
|
use util::io::*;
|
||||||
use util::rlp;
|
use util::rlp;
|
||||||
use util::sha3::*;
|
use util::sha3::*;
|
||||||
@ -47,7 +45,7 @@ use state::State;
|
|||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use engine::Engine;
|
use engine::Engine;
|
||||||
use views::HeaderView;
|
use views::HeaderView;
|
||||||
use service::{NetSyncMessage, SyncMessage};
|
use service::ClientIoMessage;
|
||||||
use env_info::LastHashes;
|
use env_info::LastHashes;
|
||||||
use verification;
|
use verification;
|
||||||
use verification::{PreverifiedBlock, Verifier};
|
use verification::{PreverifiedBlock, Verifier};
|
||||||
@ -60,7 +58,7 @@ use block_queue::{BlockQueue, BlockQueueInfo};
|
|||||||
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
||||||
use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig,
|
use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig,
|
||||||
DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient,
|
DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient,
|
||||||
TraceFilter, CallAnalytics, BlockImportError, Mode};
|
TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify};
|
||||||
use client::Error as ClientError;
|
use client::Error as ClientError;
|
||||||
use env_info::EnvInfo;
|
use env_info::EnvInfo;
|
||||||
use executive::{Executive, Executed, TransactOptions, contract_address};
|
use executive::{Executive, Executed, TransactOptions, contract_address};
|
||||||
@ -141,7 +139,8 @@ pub struct Client {
|
|||||||
miner: Arc<Miner>,
|
miner: Arc<Miner>,
|
||||||
sleep_state: Mutex<SleepState>,
|
sleep_state: Mutex<SleepState>,
|
||||||
liveness: AtomicBool,
|
liveness: AtomicBool,
|
||||||
io_channel: IoChannel<NetSyncMessage>,
|
io_channel: IoChannel<ClientIoMessage>,
|
||||||
|
notify: RwLock<Option<Weak<ChainNotify>>>,
|
||||||
queue_transactions: AtomicUsize,
|
queue_transactions: AtomicUsize,
|
||||||
previous_enode: Mutex<Option<String>>,
|
previous_enode: Mutex<Option<String>>,
|
||||||
}
|
}
|
||||||
@ -178,7 +177,7 @@ impl Client {
|
|||||||
spec: Spec,
|
spec: Spec,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
miner: Arc<Miner>,
|
miner: Arc<Miner>,
|
||||||
message_channel: IoChannel<NetSyncMessage>
|
message_channel: IoChannel<ClientIoMessage>,
|
||||||
) -> Result<Arc<Client>, ClientError> {
|
) -> Result<Arc<Client>, ClientError> {
|
||||||
let path = get_db_path(path, config.pruning, spec.genesis_header().hash());
|
let path = get_db_path(path, config.pruning, spec.genesis_header().hash());
|
||||||
let gb = spec.genesis_block();
|
let gb = spec.genesis_block();
|
||||||
@ -228,12 +227,24 @@ impl Client {
|
|||||||
trie_factory: TrieFactory::new(config.trie_spec),
|
trie_factory: TrieFactory::new(config.trie_spec),
|
||||||
miner: miner,
|
miner: miner,
|
||||||
io_channel: message_channel,
|
io_channel: message_channel,
|
||||||
|
notify: RwLock::new(None),
|
||||||
queue_transactions: AtomicUsize::new(0),
|
queue_transactions: AtomicUsize::new(0),
|
||||||
previous_enode: Mutex::new(None),
|
previous_enode: Mutex::new(None),
|
||||||
};
|
};
|
||||||
Ok(Arc::new(client))
|
Ok(Arc::new(client))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sets the actor to be notified on certain events
|
||||||
|
pub fn set_notify(&self, target: &Arc<ChainNotify>) {
|
||||||
|
let mut write_lock = self.notify.unwrapped_write();
|
||||||
|
*write_lock = Some(Arc::downgrade(target));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notify(&self) -> Option<Arc<ChainNotify>> {
|
||||||
|
let read_lock = self.notify.unwrapped_read();
|
||||||
|
read_lock.as_ref().and_then(|weak| weak.upgrade())
|
||||||
|
}
|
||||||
|
|
||||||
/// Flush the block import queue.
|
/// Flush the block import queue.
|
||||||
pub fn flush_queue(&self) {
|
pub fn flush_queue(&self) {
|
||||||
self.block_queue.flush();
|
self.block_queue.flush();
|
||||||
@ -327,52 +338,54 @@ impl Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||||
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
|
pub fn import_verified_blocks(&self, io: &IoChannel<ClientIoMessage>) -> usize {
|
||||||
let max_blocks_to_import = 64;
|
let max_blocks_to_import = 64;
|
||||||
|
let (imported_blocks, import_results, invalid_blocks, original_best, imported) = {
|
||||||
|
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
|
||||||
|
let mut invalid_blocks = HashSet::new();
|
||||||
|
let mut import_results = Vec::with_capacity(max_blocks_to_import);
|
||||||
|
|
||||||
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
|
let _import_lock = self.import_lock.lock();
|
||||||
let mut invalid_blocks = HashSet::new();
|
let _timer = PerfTimer::new("import_verified_blocks");
|
||||||
let mut import_results = Vec::with_capacity(max_blocks_to_import);
|
let blocks = self.block_queue.drain(max_blocks_to_import);
|
||||||
|
|
||||||
let _import_lock = self.import_lock.lock();
|
let original_best = self.chain_info().best_block_hash;
|
||||||
let _timer = PerfTimer::new("import_verified_blocks");
|
|
||||||
let blocks = self.block_queue.drain(max_blocks_to_import);
|
|
||||||
|
|
||||||
let original_best = self.chain_info().best_block_hash;
|
for block in blocks {
|
||||||
|
let header = &block.header;
|
||||||
|
|
||||||
for block in blocks {
|
if invalid_blocks.contains(&header.parent_hash) {
|
||||||
let header = &block.header;
|
invalid_blocks.insert(header.hash());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let closed_block = self.check_and_close_block(&block);
|
||||||
|
if let Err(_) = closed_block {
|
||||||
|
invalid_blocks.insert(header.hash());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let closed_block = closed_block.unwrap();
|
||||||
|
imported_blocks.push(header.hash());
|
||||||
|
|
||||||
if invalid_blocks.contains(&header.parent_hash) {
|
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
|
||||||
invalid_blocks.insert(header.hash());
|
import_results.push(route);
|
||||||
continue;
|
|
||||||
|
self.report.unwrapped_write().accrue_block(&block);
|
||||||
|
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
||||||
}
|
}
|
||||||
let closed_block = self.check_and_close_block(&block);
|
|
||||||
if let Err(_) = closed_block {
|
let imported = imported_blocks.len();
|
||||||
invalid_blocks.insert(header.hash());
|
let invalid_blocks = invalid_blocks.into_iter().collect::<Vec<H256>>();
|
||||||
continue;
|
|
||||||
|
{
|
||||||
|
if !invalid_blocks.is_empty() {
|
||||||
|
self.block_queue.mark_as_bad(&invalid_blocks);
|
||||||
|
}
|
||||||
|
if !imported_blocks.is_empty() {
|
||||||
|
self.block_queue.mark_as_good(&imported_blocks);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let closed_block = closed_block.unwrap();
|
(imported_blocks, import_results, invalid_blocks, original_best, imported)
|
||||||
imported_blocks.push(header.hash());
|
};
|
||||||
|
|
||||||
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
|
|
||||||
import_results.push(route);
|
|
||||||
|
|
||||||
self.report.unwrapped_write().accrue_block(&block);
|
|
||||||
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
|
||||||
}
|
|
||||||
|
|
||||||
let imported = imported_blocks.len();
|
|
||||||
let invalid_blocks = invalid_blocks.into_iter().collect::<Vec<H256>>();
|
|
||||||
|
|
||||||
{
|
|
||||||
if !invalid_blocks.is_empty() {
|
|
||||||
self.block_queue.mark_as_bad(&invalid_blocks);
|
|
||||||
}
|
|
||||||
if !imported_blocks.is_empty() {
|
|
||||||
self.block_queue.mark_as_good(&imported_blocks);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
||||||
@ -382,13 +395,15 @@ impl Client {
|
|||||||
self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted);
|
self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted);
|
||||||
}
|
}
|
||||||
|
|
||||||
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
if let Some(notify) = self.notify() {
|
||||||
imported: imported_blocks,
|
notify.new_blocks(
|
||||||
invalid: invalid_blocks,
|
imported_blocks,
|
||||||
enacted: enacted,
|
invalid_blocks,
|
||||||
retracted: retracted,
|
enacted,
|
||||||
sealed: Vec::new(),
|
retracted,
|
||||||
})).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
Vec::new(),
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,7 +425,7 @@ impl Client {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Commit results
|
// Commit results
|
||||||
let receipts = block.receipts().clone();
|
let receipts = block.receipts().to_owned();
|
||||||
let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
|
let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
|
||||||
|
|
||||||
// CHECK! I *think* this is fine, even if the state_root is equal to another
|
// CHECK! I *think* this is fine, even if the state_root is equal to another
|
||||||
@ -566,7 +581,9 @@ impl Client {
|
|||||||
fn wake_up(&self) {
|
fn wake_up(&self) {
|
||||||
if !self.liveness.load(AtomicOrdering::Relaxed) {
|
if !self.liveness.load(AtomicOrdering::Relaxed) {
|
||||||
self.liveness.store(true, AtomicOrdering::Relaxed);
|
self.liveness.store(true, AtomicOrdering::Relaxed);
|
||||||
self.io_channel.send(NetworkIoMessage::User(SyncMessage::StartNetwork)).unwrap();
|
if let Some(notify) = self.notify() {
|
||||||
|
notify.start();
|
||||||
|
}
|
||||||
trace!(target: "mode", "wake_up: Waking.");
|
trace!(target: "mode", "wake_up: Waking.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -576,7 +593,9 @@ impl Client {
|
|||||||
// only sleep if the import queue is mostly empty.
|
// only sleep if the import queue is mostly empty.
|
||||||
if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON {
|
if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON {
|
||||||
self.liveness.store(false, AtomicOrdering::Relaxed);
|
self.liveness.store(false, AtomicOrdering::Relaxed);
|
||||||
self.io_channel.send(NetworkIoMessage::User(SyncMessage::StopNetwork)).unwrap();
|
if let Some(notify) = self.notify() {
|
||||||
|
notify.stop();
|
||||||
|
}
|
||||||
trace!(target: "mode", "sleep: Sleeping.");
|
trace!(target: "mode", "sleep: Sleeping.");
|
||||||
} else {
|
} else {
|
||||||
trace!(target: "mode", "sleep: Cannot sleep - syncing ongoing.");
|
trace!(target: "mode", "sleep: Cannot sleep - syncing ongoing.");
|
||||||
@ -901,7 +920,7 @@ impl BlockChainClient for Client {
|
|||||||
debug!("Ignoring {} transactions: queue is full", transactions.len());
|
debug!("Ignoring {} transactions: queue is full", transactions.len());
|
||||||
} else {
|
} else {
|
||||||
let len = transactions.len();
|
let len = transactions.len();
|
||||||
match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) {
|
match self.io_channel.send(ClientIoMessage::NewTransactions(transactions)) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst);
|
self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst);
|
||||||
}
|
}
|
||||||
@ -969,13 +988,15 @@ impl MiningBlockChainClient for Client {
|
|||||||
let (enacted, retracted) = self.calculate_enacted_retracted(&[route]);
|
let (enacted, retracted) = self.calculate_enacted_retracted(&[route]);
|
||||||
self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);
|
self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);
|
||||||
|
|
||||||
self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
if let Some(notify) = self.notify() {
|
||||||
imported: vec![h.clone()],
|
notify.new_blocks(
|
||||||
invalid: vec![],
|
vec![h.clone()],
|
||||||
enacted: enacted,
|
vec![],
|
||||||
retracted: retracted,
|
enacted,
|
||||||
sealed: vec![h.clone()],
|
retracted,
|
||||||
})).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
vec![h.clone()],
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.chain_info().best_block_hash != original_best {
|
if self.chain_info().best_block_hash != original_best {
|
||||||
|
@ -20,6 +20,7 @@ mod config;
|
|||||||
mod error;
|
mod error;
|
||||||
mod test_client;
|
mod test_client;
|
||||||
mod trace;
|
mod trace;
|
||||||
|
mod chain_notify;
|
||||||
|
|
||||||
pub use self::client::*;
|
pub use self::client::*;
|
||||||
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType};
|
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType};
|
||||||
@ -29,6 +30,7 @@ pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
|||||||
pub use types::trace_filter::Filter as TraceFilter;
|
pub use types::trace_filter::Filter as TraceFilter;
|
||||||
pub use executive::{Executed, Executive, TransactOptions};
|
pub use executive::{Executed, Executive, TransactOptions};
|
||||||
pub use env_info::{LastHashes, EnvInfo};
|
pub use env_info::{LastHashes, EnvInfo};
|
||||||
|
pub use self::chain_notify::ChainNotify;
|
||||||
|
|
||||||
use util::bytes::Bytes;
|
use util::bytes::Bytes;
|
||||||
use util::hash::{Address, H256, H2048};
|
use util::hash::{Address, H256, H2048};
|
||||||
|
@ -230,6 +230,10 @@ pub enum Error {
|
|||||||
PowInvalid,
|
PowInvalid,
|
||||||
/// Error concerning TrieDBs
|
/// Error concerning TrieDBs
|
||||||
Trie(TrieError),
|
Trie(TrieError),
|
||||||
|
/// Io error.
|
||||||
|
Io(::std::io::Error),
|
||||||
|
/// Snappy error.
|
||||||
|
Snappy(::util::snappy::InvalidInput),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
@ -246,6 +250,8 @@ impl fmt::Display for Error {
|
|||||||
Error::PowHashInvalid => f.write_str("Invalid or out of date PoW hash."),
|
Error::PowHashInvalid => f.write_str("Invalid or out of date PoW hash."),
|
||||||
Error::PowInvalid => f.write_str("Invalid nonce or mishash"),
|
Error::PowInvalid => f.write_str("Invalid nonce or mishash"),
|
||||||
Error::Trie(ref err) => f.write_fmt(format_args!("{}", err)),
|
Error::Trie(ref err) => f.write_fmt(format_args!("{}", err)),
|
||||||
|
Error::Io(ref err) => f.write_fmt(format_args!("{}", err)),
|
||||||
|
Error::Snappy(ref err) => f.write_fmt(format_args!("{}", err)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -313,6 +319,18 @@ impl From<TrieError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<::std::io::Error> for Error {
|
||||||
|
fn from(err: ::std::io::Error) -> Error {
|
||||||
|
Error::Io(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<::util::snappy::InvalidInput> for Error {
|
||||||
|
fn from(err: ::util::snappy::InvalidInput) -> Error {
|
||||||
|
Error::Snappy(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<BlockImportError> for Error {
|
impl From<BlockImportError> for Error {
|
||||||
fn from(err: BlockImportError) -> Error {
|
fn from(err: BlockImportError) -> Error {
|
||||||
match err {
|
match err {
|
||||||
|
@ -107,6 +107,9 @@ pub trait CostType: ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Out
|
|||||||
fn overflow_add(self, other: Self) -> (Self, bool);
|
fn overflow_add(self, other: Self) -> (Self, bool);
|
||||||
/// Multiple with overflow
|
/// Multiple with overflow
|
||||||
fn overflow_mul(self, other: Self) -> (Self, bool);
|
fn overflow_mul(self, other: Self) -> (Self, bool);
|
||||||
|
/// Single-step full multiplication and division: `self*other/div`
|
||||||
|
/// Should not overflow on intermediate steps
|
||||||
|
fn overflow_mul_div(self, other: Self, div: Self) -> (Self, bool);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CostType for U256 {
|
impl CostType for U256 {
|
||||||
@ -129,6 +132,17 @@ impl CostType for U256 {
|
|||||||
fn overflow_mul(self, other: Self) -> (Self, bool) {
|
fn overflow_mul(self, other: Self) -> (Self, bool) {
|
||||||
Uint::overflowing_mul(self, other)
|
Uint::overflowing_mul(self, other)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn overflow_mul_div(self, other: Self, div: Self) -> (Self, bool) {
|
||||||
|
let x = self.full_mul(other);
|
||||||
|
let (U512(parts), o) = Uint::overflowing_div(x, U512::from(div));
|
||||||
|
let overflow = (parts[4] | parts[5] | parts[6] | parts[7]) > 0;
|
||||||
|
|
||||||
|
(
|
||||||
|
U256([parts[0], parts[1], parts[2], parts[3]]),
|
||||||
|
o | overflow
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CostType for usize {
|
impl CostType for usize {
|
||||||
@ -154,6 +168,14 @@ impl CostType for usize {
|
|||||||
fn overflow_mul(self, other: Self) -> (Self, bool) {
|
fn overflow_mul(self, other: Self) -> (Self, bool) {
|
||||||
self.overflowing_mul(other)
|
self.overflowing_mul(other)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn overflow_mul_div(self, other: Self, div: Self) -> (Self, bool) {
|
||||||
|
let (c, o) = U128::from(self).overflowing_mul(U128::from(other));
|
||||||
|
let (U128(parts), o1) = c.overflowing_div(U128::from(div));
|
||||||
|
let result = parts[0] as usize;
|
||||||
|
let overflow = o | o1 | (parts[1] > 0) | (parts[0] > result as u64);
|
||||||
|
(result, overflow)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Evm interface
|
/// Evm interface
|
||||||
@ -164,3 +186,41 @@ pub trait Evm {
|
|||||||
/// to compute the final gas left.
|
/// to compute the final gas left.
|
||||||
fn exec(&mut self, params: ActionParams, ext: &mut Ext) -> Result<GasLeft>;
|
fn exec(&mut self, params: ActionParams, ext: &mut Ext) -> Result<GasLeft>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_calculate_overflow_mul_div_without_overflow() {
|
||||||
|
// given
|
||||||
|
let num = 10_000_000;
|
||||||
|
|
||||||
|
// when
|
||||||
|
let (res1, o1) = U256::from(num).overflow_mul_div(U256::from(num), U256::from(num));
|
||||||
|
let (res2, o2) = num.overflow_mul_div(num, num);
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(res1, U256::from(num));
|
||||||
|
assert!(!o1);
|
||||||
|
assert_eq!(res2, num);
|
||||||
|
assert!(!o2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_calculate_overflow_mul_div_with_overflow() {
|
||||||
|
// given
|
||||||
|
let max = ::std::u64::MAX;
|
||||||
|
let num1 = U256([max, max, max, max]);
|
||||||
|
let num2 = ::std::usize::MAX;
|
||||||
|
|
||||||
|
// when
|
||||||
|
let (res1, o1) = num1.overflow_mul_div(num1, num1 - U256::from(2));
|
||||||
|
let (res2, o2) = num2.overflow_mul_div(num2, num2 - 2);
|
||||||
|
|
||||||
|
// then
|
||||||
|
// (x+2)^2/x = (x^2 + 4x + 4)/x = x + 4 + 4/x ~ (MAX-2) + 4 + 0 = 1
|
||||||
|
assert_eq!(res2, 1);
|
||||||
|
assert!(o2);
|
||||||
|
|
||||||
|
assert_eq!(res1, U256::from(1));
|
||||||
|
assert!(o1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -101,6 +101,12 @@ pub enum GasPriceTier {
|
|||||||
Invalid
|
Invalid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for GasPriceTier {
|
||||||
|
fn default() -> Self {
|
||||||
|
GasPriceTier::Invalid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the index in schedule for specific `GasPriceTier`
|
/// Returns the index in schedule for specific `GasPriceTier`
|
||||||
pub fn get_tier_idx (tier: GasPriceTier) -> usize {
|
pub fn get_tier_idx (tier: GasPriceTier) -> usize {
|
||||||
match tier {
|
match tier {
|
||||||
@ -116,6 +122,7 @@ pub fn get_tier_idx (tier: GasPriceTier) -> usize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Default)]
|
||||||
pub struct InstructionInfo {
|
pub struct InstructionInfo {
|
||||||
pub name: &'static str,
|
pub name: &'static str,
|
||||||
pub additional: usize,
|
pub additional: usize,
|
||||||
@ -126,7 +133,7 @@ pub struct InstructionInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl InstructionInfo {
|
impl InstructionInfo {
|
||||||
pub fn new(name: &'static str, additional: usize, args: usize, ret: usize, side_effects: bool, tier: GasPriceTier) -> InstructionInfo {
|
pub fn new(name: &'static str, additional: usize, args: usize, ret: usize, side_effects: bool, tier: GasPriceTier) -> Self {
|
||||||
InstructionInfo {
|
InstructionInfo {
|
||||||
name: name,
|
name: name,
|
||||||
additional: additional,
|
additional: additional,
|
||||||
@ -138,142 +145,141 @@ impl InstructionInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
lazy_static! {
|
||||||
/// Return details about specific instruction
|
pub static ref INSTRUCTIONS: [InstructionInfo; 0x100] = {
|
||||||
pub fn get_info(instruction: Instruction) -> InstructionInfo {
|
let mut arr = [InstructionInfo::default(); 0x100];
|
||||||
match instruction {
|
arr[STOP as usize] = InstructionInfo::new("STOP", 0, 0, 0, true, GasPriceTier::Zero);
|
||||||
STOP => InstructionInfo::new("STOP", 0, 0, 0, true, GasPriceTier::Zero),
|
arr[ADD as usize] = InstructionInfo::new("ADD", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
ADD => InstructionInfo::new("ADD", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[SUB as usize] = InstructionInfo::new("SUB", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
SUB => InstructionInfo::new("SUB", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[MUL as usize] = InstructionInfo::new("MUL", 0, 2, 1, false, GasPriceTier::Low);
|
||||||
MUL => InstructionInfo::new("MUL", 0, 2, 1, false, GasPriceTier::Low),
|
arr[DIV as usize] = InstructionInfo::new("DIV", 0, 2, 1, false, GasPriceTier::Low);
|
||||||
DIV => InstructionInfo::new("DIV", 0, 2, 1, false, GasPriceTier::Low),
|
arr[SDIV as usize] = InstructionInfo::new("SDIV", 0, 2, 1, false, GasPriceTier::Low);
|
||||||
SDIV => InstructionInfo::new("SDIV", 0, 2, 1, false, GasPriceTier::Low),
|
arr[MOD as usize] = InstructionInfo::new("MOD", 0, 2, 1, false, GasPriceTier::Low);
|
||||||
MOD => InstructionInfo::new("MOD", 0, 2, 1, false, GasPriceTier::Low),
|
arr[SMOD as usize] = InstructionInfo::new("SMOD", 0, 2, 1, false, GasPriceTier::Low);
|
||||||
SMOD => InstructionInfo::new("SMOD", 0, 2, 1, false, GasPriceTier::Low),
|
arr[EXP as usize] = InstructionInfo::new("EXP", 0, 2, 1, false, GasPriceTier::Special);
|
||||||
EXP => InstructionInfo::new("EXP", 0, 2, 1, false, GasPriceTier::Special),
|
arr[NOT as usize] = InstructionInfo::new("NOT", 0, 1, 1, false, GasPriceTier::VeryLow);
|
||||||
NOT => InstructionInfo::new("NOT", 0, 1, 1, false, GasPriceTier::VeryLow),
|
arr[LT as usize] = InstructionInfo::new("LT", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
LT => InstructionInfo::new("LT", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[GT as usize] = InstructionInfo::new("GT", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
GT => InstructionInfo::new("GT", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[SLT as usize] = InstructionInfo::new("SLT", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
SLT => InstructionInfo::new("SLT", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[SGT as usize] = InstructionInfo::new("SGT", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
SGT => InstructionInfo::new("SGT", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[EQ as usize] = InstructionInfo::new("EQ", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
EQ => InstructionInfo::new("EQ", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[ISZERO as usize] = InstructionInfo::new("ISZERO", 0, 1, 1, false, GasPriceTier::VeryLow);
|
||||||
ISZERO => InstructionInfo::new("ISZERO", 0, 1, 1, false, GasPriceTier::VeryLow),
|
arr[AND as usize] = InstructionInfo::new("AND", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
AND => InstructionInfo::new("AND", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[OR as usize] = InstructionInfo::new("OR", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
OR => InstructionInfo::new("OR", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[XOR as usize] = InstructionInfo::new("XOR", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
XOR => InstructionInfo::new("XOR", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[BYTE as usize] = InstructionInfo::new("BYTE", 0, 2, 1, false, GasPriceTier::VeryLow);
|
||||||
BYTE => InstructionInfo::new("BYTE", 0, 2, 1, false, GasPriceTier::VeryLow),
|
arr[ADDMOD as usize] = InstructionInfo::new("ADDMOD", 0, 3, 1, false, GasPriceTier::Mid);
|
||||||
ADDMOD => InstructionInfo::new("ADDMOD", 0, 3, 1, false, GasPriceTier::Mid),
|
arr[MULMOD as usize] = InstructionInfo::new("MULMOD", 0, 3, 1, false, GasPriceTier::Mid);
|
||||||
MULMOD => InstructionInfo::new("MULMOD", 0, 3, 1, false, GasPriceTier::Mid),
|
arr[SIGNEXTEND as usize] = InstructionInfo::new("SIGNEXTEND", 0, 2, 1, false, GasPriceTier::Low);
|
||||||
SIGNEXTEND => InstructionInfo::new("SIGNEXTEND", 0, 2, 1, false, GasPriceTier::Low),
|
arr[SHA3 as usize] = InstructionInfo::new("SHA3", 0, 2, 1, false, GasPriceTier::Special);
|
||||||
SHA3 => InstructionInfo::new("SHA3", 0, 2, 1, false, GasPriceTier::Special),
|
arr[ADDRESS as usize] = InstructionInfo::new("ADDRESS", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
ADDRESS => InstructionInfo::new("ADDRESS", 0, 0, 1, false, GasPriceTier::Base),
|
arr[BALANCE as usize] = InstructionInfo::new("BALANCE", 0, 1, 1, false, GasPriceTier::Ext);
|
||||||
BALANCE => InstructionInfo::new("BALANCE", 0, 1, 1, false, GasPriceTier::Ext),
|
arr[ORIGIN as usize] = InstructionInfo::new("ORIGIN", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
ORIGIN => InstructionInfo::new("ORIGIN", 0, 0, 1, false, GasPriceTier::Base),
|
arr[CALLER as usize] = InstructionInfo::new("CALLER", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
CALLER => InstructionInfo::new("CALLER", 0, 0, 1, false, GasPriceTier::Base),
|
arr[CALLVALUE as usize] = InstructionInfo::new("CALLVALUE", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
CALLVALUE => InstructionInfo::new("CALLVALUE", 0, 0, 1, false, GasPriceTier::Base),
|
arr[CALLDATALOAD as usize] = InstructionInfo::new("CALLDATALOAD", 0, 1, 1, false, GasPriceTier::VeryLow);
|
||||||
CALLDATALOAD => InstructionInfo::new("CALLDATALOAD", 0, 1, 1, false, GasPriceTier::VeryLow),
|
arr[CALLDATASIZE as usize] = InstructionInfo::new("CALLDATASIZE", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
CALLDATASIZE => InstructionInfo::new("CALLDATASIZE", 0, 0, 1, false, GasPriceTier::Base),
|
arr[CALLDATACOPY as usize] = InstructionInfo::new("CALLDATACOPY", 0, 3, 0, true, GasPriceTier::VeryLow);
|
||||||
CALLDATACOPY => InstructionInfo::new("CALLDATACOPY", 0, 3, 0, true, GasPriceTier::VeryLow),
|
arr[CODESIZE as usize] = InstructionInfo::new("CODESIZE", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
CODESIZE => InstructionInfo::new("CODESIZE", 0, 0, 1, false, GasPriceTier::Base),
|
arr[CODECOPY as usize] = InstructionInfo::new("CODECOPY", 0, 3, 0, true, GasPriceTier::VeryLow);
|
||||||
CODECOPY => InstructionInfo::new("CODECOPY", 0, 3, 0, true, GasPriceTier::VeryLow),
|
arr[GASPRICE as usize] = InstructionInfo::new("GASPRICE", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
GASPRICE => InstructionInfo::new("GASPRICE", 0, 0, 1, false, GasPriceTier::Base),
|
arr[EXTCODESIZE as usize] = InstructionInfo::new("EXTCODESIZE", 0, 1, 1, false, GasPriceTier::Ext);
|
||||||
EXTCODESIZE => InstructionInfo::new("EXTCODESIZE", 0, 1, 1, false, GasPriceTier::Ext),
|
arr[EXTCODECOPY as usize] = InstructionInfo::new("EXTCODECOPY", 0, 4, 0, true, GasPriceTier::Ext);
|
||||||
EXTCODECOPY => InstructionInfo::new("EXTCODECOPY", 0, 4, 0, true, GasPriceTier::Ext),
|
arr[BLOCKHASH as usize] = InstructionInfo::new("BLOCKHASH", 0, 1, 1, false, GasPriceTier::Ext);
|
||||||
BLOCKHASH => InstructionInfo::new("BLOCKHASH", 0, 1, 1, false, GasPriceTier::Ext),
|
arr[COINBASE as usize] = InstructionInfo::new("COINBASE", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
COINBASE => InstructionInfo::new("COINBASE", 0, 0, 1, false, GasPriceTier::Base),
|
arr[TIMESTAMP as usize] = InstructionInfo::new("TIMESTAMP", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
TIMESTAMP => InstructionInfo::new("TIMESTAMP", 0, 0, 1, false, GasPriceTier::Base),
|
arr[NUMBER as usize] = InstructionInfo::new("NUMBER", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
NUMBER => InstructionInfo::new("NUMBER", 0, 0, 1, false, GasPriceTier::Base),
|
arr[DIFFICULTY as usize] = InstructionInfo::new("DIFFICULTY", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
DIFFICULTY => InstructionInfo::new("DIFFICULTY", 0, 0, 1, false, GasPriceTier::Base),
|
arr[GASLIMIT as usize] = InstructionInfo::new("GASLIMIT", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
GASLIMIT => InstructionInfo::new("GASLIMIT", 0, 0, 1, false, GasPriceTier::Base),
|
arr[POP as usize] = InstructionInfo::new("POP", 0, 1, 0, false, GasPriceTier::Base);
|
||||||
POP => InstructionInfo::new("POP", 0, 1, 0, false, GasPriceTier::Base),
|
arr[MLOAD as usize] = InstructionInfo::new("MLOAD", 0, 1, 1, false, GasPriceTier::VeryLow);
|
||||||
MLOAD => InstructionInfo::new("MLOAD", 0, 1, 1, false, GasPriceTier::VeryLow),
|
arr[MSTORE as usize] = InstructionInfo::new("MSTORE", 0, 2, 0, true, GasPriceTier::VeryLow);
|
||||||
MSTORE => InstructionInfo::new("MSTORE", 0, 2, 0, true, GasPriceTier::VeryLow),
|
arr[MSTORE8 as usize] = InstructionInfo::new("MSTORE8", 0, 2, 0, true, GasPriceTier::VeryLow);
|
||||||
MSTORE8 => InstructionInfo::new("MSTORE8", 0, 2, 0, true, GasPriceTier::VeryLow),
|
arr[SLOAD as usize] = InstructionInfo::new("SLOAD", 0, 1, 1, false, GasPriceTier::Special);
|
||||||
SLOAD => InstructionInfo::new("SLOAD", 0, 1, 1, false, GasPriceTier::Special),
|
arr[SSTORE as usize] = InstructionInfo::new("SSTORE", 0, 2, 0, true, GasPriceTier::Special);
|
||||||
SSTORE => InstructionInfo::new("SSTORE", 0, 2, 0, true, GasPriceTier::Special),
|
arr[JUMP as usize] = InstructionInfo::new("JUMP", 0, 1, 0, true, GasPriceTier::Mid);
|
||||||
JUMP => InstructionInfo::new("JUMP", 0, 1, 0, true, GasPriceTier::Mid),
|
arr[JUMPI as usize] = InstructionInfo::new("JUMPI", 0, 2, 0, true, GasPriceTier::High);
|
||||||
JUMPI => InstructionInfo::new("JUMPI", 0, 2, 0, true, GasPriceTier::High),
|
arr[PC as usize] = InstructionInfo::new("PC", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
PC => InstructionInfo::new("PC", 0, 0, 1, false, GasPriceTier::Base),
|
arr[MSIZE as usize] = InstructionInfo::new("MSIZE", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
MSIZE => InstructionInfo::new("MSIZE", 0, 0, 1, false, GasPriceTier::Base),
|
arr[GAS as usize] = InstructionInfo::new("GAS", 0, 0, 1, false, GasPriceTier::Base);
|
||||||
GAS => InstructionInfo::new("GAS", 0, 0, 1, false, GasPriceTier::Base),
|
arr[JUMPDEST as usize] = InstructionInfo::new("JUMPDEST", 0, 0, 0, true, GasPriceTier::Special);
|
||||||
JUMPDEST => InstructionInfo::new("JUMPDEST", 0, 0, 0, true, GasPriceTier::Special),
|
arr[PUSH1 as usize] = InstructionInfo::new("PUSH1", 1, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH1 => InstructionInfo::new("PUSH1", 1, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH2 as usize] = InstructionInfo::new("PUSH2", 2, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH2 => InstructionInfo::new("PUSH2", 2, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH3 as usize] = InstructionInfo::new("PUSH3", 3, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH3 => InstructionInfo::new("PUSH3", 3, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH4 as usize] = InstructionInfo::new("PUSH4", 4, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH4 => InstructionInfo::new("PUSH4", 4, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH5 as usize] = InstructionInfo::new("PUSH5", 5, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH5 => InstructionInfo::new("PUSH5", 5, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH6 as usize] = InstructionInfo::new("PUSH6", 6, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH6 => InstructionInfo::new("PUSH6", 6, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH7 as usize] = InstructionInfo::new("PUSH7", 7, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH7 => InstructionInfo::new("PUSH7", 7, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH8 as usize] = InstructionInfo::new("PUSH8", 8, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH8 => InstructionInfo::new("PUSH8", 8, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH9 as usize] = InstructionInfo::new("PUSH9", 9, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH9 => InstructionInfo::new("PUSH9", 9, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH10 as usize] = InstructionInfo::new("PUSH10", 10, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH10 => InstructionInfo::new("PUSH10", 10, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH11 as usize] = InstructionInfo::new("PUSH11", 11, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH11 => InstructionInfo::new("PUSH11", 11, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH12 as usize] = InstructionInfo::new("PUSH12", 12, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH12 => InstructionInfo::new("PUSH12", 12, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH13 as usize] = InstructionInfo::new("PUSH13", 13, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH13 => InstructionInfo::new("PUSH13", 13, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH14 as usize] = InstructionInfo::new("PUSH14", 14, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH14 => InstructionInfo::new("PUSH14", 14, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH15 as usize] = InstructionInfo::new("PUSH15", 15, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH15 => InstructionInfo::new("PUSH15", 15, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH16 as usize] = InstructionInfo::new("PUSH16", 16, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH16 => InstructionInfo::new("PUSH16", 16, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH17 as usize] = InstructionInfo::new("PUSH17", 17, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH17 => InstructionInfo::new("PUSH17", 17, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH18 as usize] = InstructionInfo::new("PUSH18", 18, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH18 => InstructionInfo::new("PUSH18", 18, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH19 as usize] = InstructionInfo::new("PUSH19", 19, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH19 => InstructionInfo::new("PUSH19", 19, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH20 as usize] = InstructionInfo::new("PUSH20", 20, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH20 => InstructionInfo::new("PUSH20", 20, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH21 as usize] = InstructionInfo::new("PUSH21", 21, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH21 => InstructionInfo::new("PUSH21", 21, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH22 as usize] = InstructionInfo::new("PUSH22", 22, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH22 => InstructionInfo::new("PUSH22", 22, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH23 as usize] = InstructionInfo::new("PUSH23", 23, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH23 => InstructionInfo::new("PUSH23", 23, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH24 as usize] = InstructionInfo::new("PUSH24", 24, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH24 => InstructionInfo::new("PUSH24", 24, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH25 as usize] = InstructionInfo::new("PUSH25", 25, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH25 => InstructionInfo::new("PUSH25", 25, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH26 as usize] = InstructionInfo::new("PUSH26", 26, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH26 => InstructionInfo::new("PUSH26", 26, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH27 as usize] = InstructionInfo::new("PUSH27", 27, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH27 => InstructionInfo::new("PUSH27", 27, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH28 as usize] = InstructionInfo::new("PUSH28", 28, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH28 => InstructionInfo::new("PUSH28", 28, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH29 as usize] = InstructionInfo::new("PUSH29", 29, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH29 => InstructionInfo::new("PUSH29", 29, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH30 as usize] = InstructionInfo::new("PUSH30", 30, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH30 => InstructionInfo::new("PUSH30", 30, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH31 as usize] = InstructionInfo::new("PUSH31", 31, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH31 => InstructionInfo::new("PUSH31", 31, 0, 1, false, GasPriceTier::VeryLow),
|
arr[PUSH32 as usize] = InstructionInfo::new("PUSH32", 32, 0, 1, false, GasPriceTier::VeryLow);
|
||||||
PUSH32 => InstructionInfo::new("PUSH32", 32, 0, 1, false, GasPriceTier::VeryLow),
|
arr[DUP1 as usize] = InstructionInfo::new("DUP1", 0, 1, 2, false, GasPriceTier::VeryLow);
|
||||||
DUP1 => InstructionInfo::new("DUP1", 0, 1, 2, false, GasPriceTier::VeryLow),
|
arr[DUP2 as usize] = InstructionInfo::new("DUP2", 0, 2, 3, false, GasPriceTier::VeryLow);
|
||||||
DUP2 => InstructionInfo::new("DUP2", 0, 2, 3, false, GasPriceTier::VeryLow),
|
arr[DUP3 as usize] = InstructionInfo::new("DUP3", 0, 3, 4, false, GasPriceTier::VeryLow);
|
||||||
DUP3 => InstructionInfo::new("DUP3", 0, 3, 4, false, GasPriceTier::VeryLow),
|
arr[DUP4 as usize] = InstructionInfo::new("DUP4", 0, 4, 5, false, GasPriceTier::VeryLow);
|
||||||
DUP4 => InstructionInfo::new("DUP4", 0, 4, 5, false, GasPriceTier::VeryLow),
|
arr[DUP5 as usize] = InstructionInfo::new("DUP5", 0, 5, 6, false, GasPriceTier::VeryLow);
|
||||||
DUP5 => InstructionInfo::new("DUP5", 0, 5, 6, false, GasPriceTier::VeryLow),
|
arr[DUP6 as usize] = InstructionInfo::new("DUP6", 0, 6, 7, false, GasPriceTier::VeryLow);
|
||||||
DUP6 => InstructionInfo::new("DUP6", 0, 6, 7, false, GasPriceTier::VeryLow),
|
arr[DUP7 as usize] = InstructionInfo::new("DUP7", 0, 7, 8, false, GasPriceTier::VeryLow);
|
||||||
DUP7 => InstructionInfo::new("DUP7", 0, 7, 8, false, GasPriceTier::VeryLow),
|
arr[DUP8 as usize] = InstructionInfo::new("DUP8", 0, 8, 9, false, GasPriceTier::VeryLow);
|
||||||
DUP8 => InstructionInfo::new("DUP8", 0, 8, 9, false, GasPriceTier::VeryLow),
|
arr[DUP9 as usize] = InstructionInfo::new("DUP9", 0, 9, 10, false, GasPriceTier::VeryLow);
|
||||||
DUP9 => InstructionInfo::new("DUP9", 0, 9, 10, false, GasPriceTier::VeryLow),
|
arr[DUP10 as usize] = InstructionInfo::new("DUP10", 0, 10, 11, false, GasPriceTier::VeryLow);
|
||||||
DUP10 => InstructionInfo::new("DUP10", 0, 10, 11, false, GasPriceTier::VeryLow),
|
arr[DUP11 as usize] = InstructionInfo::new("DUP11", 0, 11, 12, false, GasPriceTier::VeryLow);
|
||||||
DUP11 => InstructionInfo::new("DUP11", 0, 11, 12, false, GasPriceTier::VeryLow),
|
arr[DUP12 as usize] = InstructionInfo::new("DUP12", 0, 12, 13, false, GasPriceTier::VeryLow);
|
||||||
DUP12 => InstructionInfo::new("DUP12", 0, 12, 13, false, GasPriceTier::VeryLow),
|
arr[DUP13 as usize] = InstructionInfo::new("DUP13", 0, 13, 14, false, GasPriceTier::VeryLow);
|
||||||
DUP13 => InstructionInfo::new("DUP13", 0, 13, 14, false, GasPriceTier::VeryLow),
|
arr[DUP14 as usize] = InstructionInfo::new("DUP14", 0, 14, 15, false, GasPriceTier::VeryLow);
|
||||||
DUP14 => InstructionInfo::new("DUP14", 0, 14, 15, false, GasPriceTier::VeryLow),
|
arr[DUP15 as usize] = InstructionInfo::new("DUP15", 0, 15, 16, false, GasPriceTier::VeryLow);
|
||||||
DUP15 => InstructionInfo::new("DUP15", 0, 15, 16, false, GasPriceTier::VeryLow),
|
arr[DUP16 as usize] = InstructionInfo::new("DUP16", 0, 16, 17, false, GasPriceTier::VeryLow);
|
||||||
DUP16 => InstructionInfo::new("DUP16", 0, 16, 17, false, GasPriceTier::VeryLow),
|
arr[SWAP1 as usize] = InstructionInfo::new("SWAP1", 0, 2, 2, false, GasPriceTier::VeryLow);
|
||||||
SWAP1 => InstructionInfo::new("SWAP1", 0, 2, 2, false, GasPriceTier::VeryLow),
|
arr[SWAP2 as usize] = InstructionInfo::new("SWAP2", 0, 3, 3, false, GasPriceTier::VeryLow);
|
||||||
SWAP2 => InstructionInfo::new("SWAP2", 0, 3, 3, false, GasPriceTier::VeryLow),
|
arr[SWAP3 as usize] = InstructionInfo::new("SWAP3", 0, 4, 4, false, GasPriceTier::VeryLow);
|
||||||
SWAP3 => InstructionInfo::new("SWAP3", 0, 4, 4, false, GasPriceTier::VeryLow),
|
arr[SWAP4 as usize] = InstructionInfo::new("SWAP4", 0, 5, 5, false, GasPriceTier::VeryLow);
|
||||||
SWAP4 => InstructionInfo::new("SWAP4", 0, 5, 5, false, GasPriceTier::VeryLow),
|
arr[SWAP5 as usize] = InstructionInfo::new("SWAP5", 0, 6, 6, false, GasPriceTier::VeryLow);
|
||||||
SWAP5 => InstructionInfo::new("SWAP5", 0, 6, 6, false, GasPriceTier::VeryLow),
|
arr[SWAP6 as usize] = InstructionInfo::new("SWAP6", 0, 7, 7, false, GasPriceTier::VeryLow);
|
||||||
SWAP6 => InstructionInfo::new("SWAP6", 0, 7, 7, false, GasPriceTier::VeryLow),
|
arr[SWAP7 as usize] = InstructionInfo::new("SWAP7", 0, 8, 8, false, GasPriceTier::VeryLow);
|
||||||
SWAP7 => InstructionInfo::new("SWAP7", 0, 8, 8, false, GasPriceTier::VeryLow),
|
arr[SWAP8 as usize] = InstructionInfo::new("SWAP8", 0, 9, 9, false, GasPriceTier::VeryLow);
|
||||||
SWAP8 => InstructionInfo::new("SWAP8", 0, 9, 9, false, GasPriceTier::VeryLow),
|
arr[SWAP9 as usize] = InstructionInfo::new("SWAP9", 0, 10, 10, false, GasPriceTier::VeryLow);
|
||||||
SWAP9 => InstructionInfo::new("SWAP9", 0, 10, 10, false, GasPriceTier::VeryLow),
|
arr[SWAP10 as usize] = InstructionInfo::new("SWAP10", 0, 11, 11, false, GasPriceTier::VeryLow);
|
||||||
SWAP10 => InstructionInfo::new("SWAP10", 0, 11, 11, false, GasPriceTier::VeryLow),
|
arr[SWAP11 as usize] = InstructionInfo::new("SWAP11", 0, 12, 12, false, GasPriceTier::VeryLow);
|
||||||
SWAP11 => InstructionInfo::new("SWAP11", 0, 12, 12, false, GasPriceTier::VeryLow),
|
arr[SWAP12 as usize] = InstructionInfo::new("SWAP12", 0, 13, 13, false, GasPriceTier::VeryLow);
|
||||||
SWAP12 => InstructionInfo::new("SWAP12", 0, 13, 13, false, GasPriceTier::VeryLow),
|
arr[SWAP13 as usize] = InstructionInfo::new("SWAP13", 0, 14, 14, false, GasPriceTier::VeryLow);
|
||||||
SWAP13 => InstructionInfo::new("SWAP13", 0, 14, 14, false, GasPriceTier::VeryLow),
|
arr[SWAP14 as usize] = InstructionInfo::new("SWAP14", 0, 15, 15, false, GasPriceTier::VeryLow);
|
||||||
SWAP14 => InstructionInfo::new("SWAP14", 0, 15, 15, false, GasPriceTier::VeryLow),
|
arr[SWAP15 as usize] = InstructionInfo::new("SWAP15", 0, 16, 16, false, GasPriceTier::VeryLow);
|
||||||
SWAP15 => InstructionInfo::new("SWAP15", 0, 16, 16, false, GasPriceTier::VeryLow),
|
arr[SWAP16 as usize] = InstructionInfo::new("SWAP16", 0, 17, 17, false, GasPriceTier::VeryLow);
|
||||||
SWAP16 => InstructionInfo::new("SWAP16", 0, 17, 17, false, GasPriceTier::VeryLow),
|
arr[LOG0 as usize] = InstructionInfo::new("LOG0", 0, 2, 0, true, GasPriceTier::Special);
|
||||||
LOG0 => InstructionInfo::new("LOG0", 0, 2, 0, true, GasPriceTier::Special),
|
arr[LOG1 as usize] = InstructionInfo::new("LOG1", 0, 3, 0, true, GasPriceTier::Special);
|
||||||
LOG1 => InstructionInfo::new("LOG1", 0, 3, 0, true, GasPriceTier::Special),
|
arr[LOG2 as usize] = InstructionInfo::new("LOG2", 0, 4, 0, true, GasPriceTier::Special);
|
||||||
LOG2 => InstructionInfo::new("LOG2", 0, 4, 0, true, GasPriceTier::Special),
|
arr[LOG3 as usize] = InstructionInfo::new("LOG3", 0, 5, 0, true, GasPriceTier::Special);
|
||||||
LOG3 => InstructionInfo::new("LOG3", 0, 5, 0, true, GasPriceTier::Special),
|
arr[LOG4 as usize] = InstructionInfo::new("LOG4", 0, 6, 0, true, GasPriceTier::Special);
|
||||||
LOG4 => InstructionInfo::new("LOG4", 0, 6, 0, true, GasPriceTier::Special),
|
arr[CREATE as usize] = InstructionInfo::new("CREATE", 0, 3, 1, true, GasPriceTier::Special);
|
||||||
CREATE => InstructionInfo::new("CREATE", 0, 3, 1, true, GasPriceTier::Special),
|
arr[CALL as usize] = InstructionInfo::new("CALL", 0, 7, 1, true, GasPriceTier::Special);
|
||||||
CALL => InstructionInfo::new("CALL", 0, 7, 1, true, GasPriceTier::Special),
|
arr[CALLCODE as usize] = InstructionInfo::new("CALLCODE", 0, 7, 1, true, GasPriceTier::Special);
|
||||||
CALLCODE => InstructionInfo::new("CALLCODE", 0, 7, 1, true, GasPriceTier::Special),
|
arr[RETURN as usize] = InstructionInfo::new("RETURN", 0, 2, 0, true, GasPriceTier::Zero);
|
||||||
RETURN => InstructionInfo::new("RETURN", 0, 2, 0, true, GasPriceTier::Zero),
|
arr[DELEGATECALL as usize] = InstructionInfo::new("DELEGATECALL", 0, 6, 1, true, GasPriceTier::Special);
|
||||||
DELEGATECALL => InstructionInfo::new("DELEGATECALL", 0, 6, 1, true, GasPriceTier::Special),
|
arr[SUICIDE as usize] = InstructionInfo::new("SUICIDE", 0, 1, 0, true, GasPriceTier::Zero);
|
||||||
SUICIDE => InstructionInfo::new("SUICIDE", 0, 1, 0, true, GasPriceTier::Zero),
|
arr
|
||||||
_ => InstructionInfo::new("INVALID_INSTRUCTION", 0, 0, 0, false, GasPriceTier::Invalid)
|
};
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Virtual machine bytecode instruction.
|
/// Virtual machine bytecode instruction.
|
||||||
|
@ -68,6 +68,9 @@ impl<Gas: CostType> Gasometer<Gas> {
|
|||||||
let default_gas = Gas::from(schedule.tier_step_gas[tier]);
|
let default_gas = Gas::from(schedule.tier_step_gas[tier]);
|
||||||
|
|
||||||
let cost = match instruction {
|
let cost = match instruction {
|
||||||
|
instructions::JUMPDEST => {
|
||||||
|
InstructionCost::Gas(Gas::from(1))
|
||||||
|
},
|
||||||
instructions::SSTORE => {
|
instructions::SSTORE => {
|
||||||
let address = H256::from(stack.peek(0));
|
let address = H256::from(stack.peek(0));
|
||||||
let newval = stack.peek(1);
|
let newval = stack.peek(1);
|
||||||
@ -106,9 +109,6 @@ impl<Gas: CostType> Gasometer<Gas> {
|
|||||||
instructions::EXTCODECOPY => {
|
instructions::EXTCODECOPY => {
|
||||||
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(1), stack.peek(3))), try!(Gas::from_u256(*stack.peek(3))))
|
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(1), stack.peek(3))), try!(Gas::from_u256(*stack.peek(3))))
|
||||||
},
|
},
|
||||||
instructions::JUMPDEST => {
|
|
||||||
InstructionCost::Gas(Gas::from(1))
|
|
||||||
},
|
|
||||||
instructions::LOG0...instructions::LOG4 => {
|
instructions::LOG0...instructions::LOG4 => {
|
||||||
let no_of_topics = instructions::get_log_topics(instruction);
|
let no_of_topics = instructions::get_log_topics(instruction);
|
||||||
let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics;
|
let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics;
|
||||||
@ -199,14 +199,12 @@ impl<Gas: CostType> Gasometer<Gas> {
|
|||||||
let s = mem_size >> 5;
|
let s = mem_size >> 5;
|
||||||
// s * memory_gas + s * s / quad_coeff_div
|
// s * memory_gas + s * s / quad_coeff_div
|
||||||
let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas)));
|
let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas)));
|
||||||
// We need to go to U512 to calculate s*s/quad_coeff_div
|
|
||||||
let b = U512::from(s.as_u256()) * U512::from(s.as_u256()) / U512::from(schedule.quad_coeff_div);
|
// Calculate s*s/quad_coeff_div
|
||||||
if b > U512::from(!U256::zero()) {
|
let b = overflowing!(s.overflow_mul_div(s, Gas::from(schedule.quad_coeff_div)));
|
||||||
Err(evm::Error::OutOfGas)
|
Ok(overflowing!(a.overflow_add(b)))
|
||||||
} else {
|
|
||||||
Ok(overflowing!(a.overflow_add(try!(Gas::from_u256(U256::from(b))))))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let current_mem_size = Gas::from(current_mem_size);
|
let current_mem_size = Gas::from(current_mem_size);
|
||||||
let req_mem_size_rounded = (overflowing!(mem_size.overflow_add(Gas::from(31 as usize))) >> 5) << 5;
|
let req_mem_size_rounded = (overflowing!(mem_size.overflow_add(Gas::from(31 as usize))) >> 5) << 5;
|
||||||
|
|
||||||
|
@ -104,12 +104,13 @@ impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
|||||||
position: 0,
|
position: 0,
|
||||||
code: &code
|
code: &code
|
||||||
};
|
};
|
||||||
|
let infos = &*instructions::INSTRUCTIONS;
|
||||||
|
|
||||||
while reader.position < code.len() {
|
while reader.position < code.len() {
|
||||||
let instruction = code[reader.position];
|
let instruction = code[reader.position];
|
||||||
reader.position += 1;
|
reader.position += 1;
|
||||||
|
|
||||||
let info = instructions::get_info(instruction);
|
let info = infos[instruction as usize];
|
||||||
try!(self.verify_instruction(ext, instruction, &info, &stack));
|
try!(self.verify_instruction(ext, instruction, &info, &stack));
|
||||||
|
|
||||||
// Calculate gas cost
|
// Calculate gas cost
|
||||||
|
@ -35,4 +35,3 @@ pub use self::evm::{Evm, Error, Finalize, GasLeft, Result, CostType};
|
|||||||
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult};
|
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult};
|
||||||
pub use self::factory::{Factory, VMType};
|
pub use self::factory::{Factory, VMType};
|
||||||
pub use self::schedule::Schedule;
|
pub use self::schedule::Schedule;
|
||||||
pub use self::instructions::get_info;
|
|
||||||
|
@ -143,12 +143,22 @@ impl Header {
|
|||||||
/// Get the difficulty field of the header.
|
/// Get the difficulty field of the header.
|
||||||
pub fn difficulty(&self) -> &U256 { &self.difficulty }
|
pub fn difficulty(&self) -> &U256 { &self.difficulty }
|
||||||
/// Get the seal field of the header.
|
/// Get the seal field of the header.
|
||||||
pub fn seal(&self) -> &Vec<Bytes> { &self.seal }
|
pub fn seal(&self) -> &[Bytes] { &self.seal }
|
||||||
|
|
||||||
// TODO: seal_at, set_seal_at &c.
|
// TODO: seal_at, set_seal_at &c.
|
||||||
|
|
||||||
/// Set the number field of the header.
|
/// Set the number field of the header.
|
||||||
pub fn set_parent_hash(&mut self, a: H256) { self.parent_hash = a; self.note_dirty(); }
|
pub fn set_parent_hash(&mut self, a: H256) { self.parent_hash = a; self.note_dirty(); }
|
||||||
|
/// Set the uncles hash field of the header.
|
||||||
|
pub fn set_uncles_hash(&mut self, a: H256) { self.uncles_hash = a; self.note_dirty(); }
|
||||||
|
/// Set the state root field of the header.
|
||||||
|
pub fn set_state_root(&mut self, a: H256) { self.state_root = a; self.note_dirty(); }
|
||||||
|
/// Set the transactions root field of the header.
|
||||||
|
pub fn set_transactions_root(&mut self, a: H256) { self.transactions_root = a; self.note_dirty() }
|
||||||
|
/// Set the receipts root field of the header.
|
||||||
|
pub fn set_receipts_root(&mut self, a: H256) { self.receipts_root = a; self.note_dirty() }
|
||||||
|
/// Set the log bloom field of the header.
|
||||||
|
pub fn set_log_bloom(&mut self, a: LogBloom) { self.log_bloom = a; self.note_dirty() }
|
||||||
/// Set the timestamp field of the header.
|
/// Set the timestamp field of the header.
|
||||||
pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
|
pub fn set_timestamp(&mut self, a: u64) { self.timestamp = a; self.note_dirty(); }
|
||||||
/// Set the timestamp field of the header to the current time.
|
/// Set the timestamp field of the header to the current time.
|
||||||
|
@ -118,8 +118,9 @@ pub mod pod_state;
|
|||||||
pub mod engine;
|
pub mod engine;
|
||||||
pub mod migrations;
|
pub mod migrations;
|
||||||
pub mod miner;
|
pub mod miner;
|
||||||
#[macro_use] pub mod evm;
|
pub mod snapshot;
|
||||||
pub mod action_params;
|
pub mod action_params;
|
||||||
|
#[macro_use] pub mod evm;
|
||||||
|
|
||||||
mod blooms;
|
mod blooms;
|
||||||
mod db;
|
mod db;
|
||||||
@ -146,5 +147,4 @@ mod tests;
|
|||||||
mod json_tests;
|
mod json_tests;
|
||||||
|
|
||||||
pub use types::*;
|
pub use types::*;
|
||||||
pub use evm::get_info;
|
|
||||||
pub use executive::contract_address;
|
pub use executive::contract_address;
|
||||||
|
@ -62,7 +62,8 @@ fn attempt_migrate(mut key_h: H256, val: &[u8]) -> Option<H256> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Version for ArchiveDB.
|
/// Version for ArchiveDB.
|
||||||
pub struct ArchiveV7;
|
#[derive(Default)]
|
||||||
|
pub struct ArchiveV7(usize);
|
||||||
|
|
||||||
impl SimpleMigration for ArchiveV7 {
|
impl SimpleMigration for ArchiveV7 {
|
||||||
fn version(&self) -> u32 {
|
fn version(&self) -> u32 {
|
||||||
@ -70,6 +71,12 @@ impl SimpleMigration for ArchiveV7 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
||||||
|
self.0 += 1;
|
||||||
|
if self.0 == 100_000 {
|
||||||
|
self.0 = 0;
|
||||||
|
flush!(".");
|
||||||
|
}
|
||||||
|
|
||||||
if key.len() != 32 {
|
if key.len() != 32 {
|
||||||
// metadata key, ignore.
|
// metadata key, ignore.
|
||||||
return Some((key, value));
|
return Some((key, value));
|
||||||
@ -91,7 +98,7 @@ const V7_VERSION_KEY: &'static [u8] = &[ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0,
|
|||||||
const DB_VERSION: u32 = 0x203;
|
const DB_VERSION: u32 = 0x203;
|
||||||
const PADDING : [u8; 10] = [0u8; 10];
|
const PADDING : [u8; 10] = [0u8; 10];
|
||||||
|
|
||||||
/// Version for OverlayRecent database.
|
/// Version for `OverlayRecent` database.
|
||||||
/// more involved than the archive version because of journaling.
|
/// more involved than the archive version because of journaling.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct OverlayRecentV7 {
|
pub struct OverlayRecentV7 {
|
||||||
@ -228,7 +235,14 @@ impl Migration for OverlayRecentV7 {
|
|||||||
_ => return Err(Error::MigrationImpossible), // missing or wrong version
|
_ => return Err(Error::MigrationImpossible), // missing or wrong version
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut count = 0;
|
||||||
for (key, value) in source.iter() {
|
for (key, value) in source.iter() {
|
||||||
|
count += 1;
|
||||||
|
if count == 100_000 {
|
||||||
|
count = 0;
|
||||||
|
flush!(".");
|
||||||
|
}
|
||||||
|
|
||||||
let mut key = key.into_vec();
|
let mut key = key.into_vec();
|
||||||
if key.len() == 32 {
|
if key.len() == 32 {
|
||||||
let key_h = H256::from_slice(&key[..]);
|
let key_h = H256::from_slice(&key[..]);
|
||||||
@ -244,4 +258,4 @@ impl Migration for OverlayRecentV7 {
|
|||||||
try!(self.walk_journal(source));
|
try!(self.walk_journal(source));
|
||||||
self.migrate_journal(source, batch, dest)
|
self.migrate_journal(source, batch, dest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -665,7 +665,7 @@ impl MinerService for Miner {
|
|||||||
};
|
};
|
||||||
match (&self.options.pending_set, sealing_set) {
|
match (&self.options.pending_set, sealing_set) {
|
||||||
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.top_transactions(),
|
(&PendingSet::AlwaysQueue, _) | (&PendingSet::SealingOrElseQueue, None) => queue.top_transactions(),
|
||||||
(_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().clone()),
|
(_, sealing) => sealing.map_or_else(Vec::new, |s| s.transactions().to_owned()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -702,7 +702,7 @@ impl MinerService for Miner {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|t| t.hash());
|
.map(|t| t.hash());
|
||||||
|
|
||||||
let receipts = pending.receipts().clone().into_iter();
|
let receipts = pending.receipts().iter().cloned();
|
||||||
|
|
||||||
hashes.zip(receipts).collect()
|
hashes.zip(receipts).collect()
|
||||||
},
|
},
|
||||||
|
@ -20,67 +20,50 @@ use util::*;
|
|||||||
use util::panics::*;
|
use util::panics::*;
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use error::*;
|
use error::*;
|
||||||
use client::{Client, ClientConfig};
|
use client::{Client, ClientConfig, ChainNotify};
|
||||||
use miner::Miner;
|
use miner::Miner;
|
||||||
|
|
||||||
/// Message type for external and internal events
|
/// Message type for external and internal events
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum SyncMessage {
|
pub enum ClientIoMessage {
|
||||||
/// New block has been imported into the blockchain
|
|
||||||
NewChainBlocks {
|
|
||||||
/// Hashes of blocks imported to blockchain
|
|
||||||
imported: Vec<H256>,
|
|
||||||
/// Hashes of blocks not imported to blockchain (because were invalid)
|
|
||||||
invalid: Vec<H256>,
|
|
||||||
/// Hashes of blocks that were removed from canonical chain
|
|
||||||
retracted: Vec<H256>,
|
|
||||||
/// Hashes of blocks that are now included in cannonical chain
|
|
||||||
enacted: Vec<H256>,
|
|
||||||
/// Hashes of blocks that are sealed by this node
|
|
||||||
sealed: Vec<H256>,
|
|
||||||
},
|
|
||||||
/// Best Block Hash in chain has been changed
|
/// Best Block Hash in chain has been changed
|
||||||
NewChainHead,
|
NewChainHead,
|
||||||
/// A block is ready
|
/// A block is ready
|
||||||
BlockVerified,
|
BlockVerified,
|
||||||
/// New transaction RLPs are ready to be imported
|
/// New transaction RLPs are ready to be imported
|
||||||
NewTransactions(Vec<Bytes>),
|
NewTransactions(Vec<Bytes>),
|
||||||
/// Start network command.
|
|
||||||
StartNetwork,
|
|
||||||
/// Stop network command.
|
|
||||||
StopNetwork,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// IO Message type used for Network service
|
|
||||||
pub type NetSyncMessage = NetworkIoMessage<SyncMessage>;
|
|
||||||
|
|
||||||
/// Client service setup. Creates and registers client and network services with the IO subsystem.
|
/// Client service setup. Creates and registers client and network services with the IO subsystem.
|
||||||
pub struct ClientService {
|
pub struct ClientService {
|
||||||
net_service: Arc<NetworkService<SyncMessage>>,
|
io_service: Arc<IoService<ClientIoMessage>>,
|
||||||
client: Arc<Client>,
|
client: Arc<Client>,
|
||||||
panic_handler: Arc<PanicHandler>
|
panic_handler: Arc<PanicHandler>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientService {
|
impl ClientService {
|
||||||
/// Start the service in a separate thread.
|
/// Start the service in a separate thread.
|
||||||
pub fn start(config: ClientConfig, spec: Spec, net_config: NetworkConfiguration, db_path: &Path, miner: Arc<Miner>, enable_network: bool) -> Result<ClientService, Error> {
|
pub fn start(
|
||||||
|
config: ClientConfig,
|
||||||
|
spec: Spec,
|
||||||
|
db_path: &Path,
|
||||||
|
miner: Arc<Miner>,
|
||||||
|
) -> Result<ClientService, Error>
|
||||||
|
{
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
let net_service = try!(NetworkService::new(net_config));
|
let io_service = try!(IoService::<ClientIoMessage>::start());
|
||||||
panic_handler.forward_from(&net_service);
|
panic_handler.forward_from(&io_service);
|
||||||
if enable_network {
|
|
||||||
try!(net_service.start());
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Configured for {} using {} engine", spec.name.clone().apply(Colour::White.bold()), spec.engine.name().apply(Colour::Yellow.bold()));
|
info!("Configured for {} using {} engine", spec.name.clone().apply(Colour::White.bold()), spec.engine.name().apply(Colour::Yellow.bold()));
|
||||||
let client = try!(Client::new(config, spec, db_path, miner, net_service.io().channel()));
|
let client = try!(Client::new(config, spec, db_path, miner, io_service.channel()));
|
||||||
panic_handler.forward_from(client.deref());
|
panic_handler.forward_from(client.deref());
|
||||||
let client_io = Arc::new(ClientIoHandler {
|
let client_io = Arc::new(ClientIoHandler {
|
||||||
client: client.clone()
|
client: client.clone()
|
||||||
});
|
});
|
||||||
try!(net_service.io().register_handler(client_io));
|
try!(io_service.register_handler(client_io));
|
||||||
|
|
||||||
Ok(ClientService {
|
Ok(ClientService {
|
||||||
net_service: Arc::new(net_service),
|
io_service: Arc::new(io_service),
|
||||||
client: client,
|
client: client,
|
||||||
panic_handler: panic_handler,
|
panic_handler: panic_handler,
|
||||||
})
|
})
|
||||||
@ -92,8 +75,8 @@ impl ClientService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get general IO interface
|
/// Get general IO interface
|
||||||
pub fn register_io_handler(&self, handler: Arc<IoHandler<NetSyncMessage> + Send>) -> Result<(), IoError> {
|
pub fn register_io_handler(&self, handler: Arc<IoHandler<ClientIoMessage> + Send>) -> Result<(), IoError> {
|
||||||
self.net_service.io().register_handler(handler)
|
self.io_service.register_handler(handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get client interface
|
/// Get client interface
|
||||||
@ -102,8 +85,13 @@ impl ClientService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get network service component
|
/// Get network service component
|
||||||
pub fn network(&mut self) -> Arc<NetworkService<SyncMessage>> {
|
pub fn io(&self) -> Arc<IoService<ClientIoMessage>> {
|
||||||
self.net_service.clone()
|
self.io_service.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the actor to be notified on certain chain events
|
||||||
|
pub fn set_notify(&self, notify: &Arc<ChainNotify>) {
|
||||||
|
self.client.set_notify(notify);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,26 +109,22 @@ struct ClientIoHandler {
|
|||||||
const CLIENT_TICK_TIMER: TimerToken = 0;
|
const CLIENT_TICK_TIMER: TimerToken = 0;
|
||||||
const CLIENT_TICK_MS: u64 = 5000;
|
const CLIENT_TICK_MS: u64 = 5000;
|
||||||
|
|
||||||
impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
impl IoHandler<ClientIoMessage> for ClientIoHandler {
|
||||||
fn initialize(&self, io: &IoContext<NetSyncMessage>) {
|
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
||||||
io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK_MS).expect("Error registering client timer");
|
io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK_MS).expect("Error registering client timer");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) {
|
fn timeout(&self, _io: &IoContext<ClientIoMessage>, timer: TimerToken) {
|
||||||
if timer == CLIENT_TICK_TIMER {
|
if timer == CLIENT_TICK_TIMER {
|
||||||
self.client.tick();
|
self.client.tick();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
#[cfg_attr(feature="dev", allow(single_match))]
|
||||||
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
fn message(&self, io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) {
|
||||||
match *net_message {
|
match *net_message {
|
||||||
UserMessage(ref message) => match *message {
|
ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }
|
||||||
SyncMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }
|
ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(&transactions); }
|
||||||
SyncMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(&transactions); }
|
|
||||||
_ => {} // ignore other messages
|
|
||||||
},
|
|
||||||
NetworkIoMessage::NetworkStarted(ref url) => { self.client.network_started(url); }
|
|
||||||
_ => {} // ignore other messages
|
_ => {} // ignore other messages
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -150,7 +134,6 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
use util::network::*;
|
|
||||||
use devtools::*;
|
use devtools::*;
|
||||||
use client::ClientConfig;
|
use client::ClientConfig;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -162,10 +145,8 @@ mod tests {
|
|||||||
let service = ClientService::start(
|
let service = ClientService::start(
|
||||||
ClientConfig::default(),
|
ClientConfig::default(),
|
||||||
get_test_spec(),
|
get_test_spec(),
|
||||||
NetworkConfiguration::new_local(),
|
|
||||||
&temp_path.as_path(),
|
&temp_path.as_path(),
|
||||||
Arc::new(Miner::with_spec(get_test_spec())),
|
Arc::new(Miner::with_spec(get_test_spec())),
|
||||||
false
|
|
||||||
);
|
);
|
||||||
assert!(service.is_ok());
|
assert!(service.is_ok());
|
||||||
}
|
}
|
||||||
|
211
ethcore/src/snapshot/account.rs
Normal file
211
ethcore/src/snapshot/account.rs
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Account state encoding and decoding
|
||||||
|
|
||||||
|
use account_db::{AccountDB, AccountDBMut};
|
||||||
|
use error::Error;
|
||||||
|
|
||||||
|
use util::{Bytes, HashDB, SHA3_EMPTY, TrieDB};
|
||||||
|
use util::hash::{FixedHash, H256};
|
||||||
|
use util::numbers::U256;
|
||||||
|
use util::rlp::{DecoderError, Rlp, RlpStream, Stream, UntrustedRlp, View};
|
||||||
|
|
||||||
|
// An alternate account structure from ::account::Account.
|
||||||
|
#[derive(PartialEq, Clone, Debug)]
|
||||||
|
pub struct Account {
|
||||||
|
nonce: U256,
|
||||||
|
balance: U256,
|
||||||
|
storage_root: H256,
|
||||||
|
code_hash: H256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Account {
|
||||||
|
// decode the account from rlp.
|
||||||
|
pub fn from_thin_rlp(rlp: &[u8]) -> Self {
|
||||||
|
let r: Rlp = Rlp::new(rlp);
|
||||||
|
|
||||||
|
Account {
|
||||||
|
nonce: r.val_at(0),
|
||||||
|
balance: r.val_at(1),
|
||||||
|
storage_root: r.val_at(2),
|
||||||
|
code_hash: r.val_at(3),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode the account to a standard rlp.
|
||||||
|
pub fn to_thin_rlp(&self) -> Bytes {
|
||||||
|
let mut stream = RlpStream::new_list(4);
|
||||||
|
stream
|
||||||
|
.append(&self.nonce)
|
||||||
|
.append(&self.balance)
|
||||||
|
.append(&self.storage_root)
|
||||||
|
.append(&self.code_hash);
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk the account's storage trie, returning an RLP item containing the
|
||||||
|
// account properties and the storage.
|
||||||
|
pub fn to_fat_rlp(&self, acct_db: &AccountDB) -> Result<Bytes, Error> {
|
||||||
|
let db = try!(TrieDB::new(acct_db, &self.storage_root));
|
||||||
|
|
||||||
|
let mut pairs = Vec::new();
|
||||||
|
|
||||||
|
for (k, v) in db.iter() {
|
||||||
|
pairs.push((k, v));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut stream = RlpStream::new_list(pairs.len());
|
||||||
|
|
||||||
|
for (k, v) in pairs {
|
||||||
|
stream.begin_list(2).append(&k).append(&v);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pairs_rlp = stream.out();
|
||||||
|
|
||||||
|
let mut account_stream = RlpStream::new_list(5);
|
||||||
|
account_stream.append(&self.nonce)
|
||||||
|
.append(&self.balance);
|
||||||
|
|
||||||
|
// [has_code, code_hash].
|
||||||
|
if self.code_hash == SHA3_EMPTY {
|
||||||
|
account_stream.append(&false).append_empty_data();
|
||||||
|
} else {
|
||||||
|
match acct_db.get(&self.code_hash) {
|
||||||
|
Some(c) => {
|
||||||
|
account_stream.append(&true).append(&c);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
warn!("code lookup failed during snapshot");
|
||||||
|
account_stream.append(&false).append_empty_data();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
account_stream.append_raw(&pairs_rlp, 1);
|
||||||
|
|
||||||
|
Ok(account_stream.out())
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode a fat rlp, and rebuild the storage trie as we go.
|
||||||
|
pub fn from_fat_rlp(acct_db: &mut AccountDBMut, rlp: UntrustedRlp) -> Result<Self, DecoderError> {
|
||||||
|
use util::{TrieDBMut, TrieMut};
|
||||||
|
|
||||||
|
let nonce = try!(rlp.val_at(0));
|
||||||
|
let balance = try!(rlp.val_at(1));
|
||||||
|
let code_hash = if try!(rlp.val_at(2)) {
|
||||||
|
let code: Bytes = try!(rlp.val_at(3));
|
||||||
|
acct_db.insert(&code)
|
||||||
|
} else {
|
||||||
|
SHA3_EMPTY
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut storage_root = H256::zero();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut storage_trie = TrieDBMut::new(acct_db, &mut storage_root);
|
||||||
|
let pairs = try!(rlp.at(4));
|
||||||
|
for pair_rlp in pairs.iter() {
|
||||||
|
let k: Bytes = try!(pair_rlp.val_at(0));
|
||||||
|
let v: Bytes = try!(pair_rlp.val_at(1));
|
||||||
|
|
||||||
|
storage_trie.insert(&k, &v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Account {
|
||||||
|
nonce: nonce,
|
||||||
|
balance: balance,
|
||||||
|
storage_root: storage_root,
|
||||||
|
code_hash: code_hash,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use account_db::{AccountDB, AccountDBMut};
|
||||||
|
use tests::helpers::get_temp_journal_db;
|
||||||
|
|
||||||
|
use util::{SHA3_NULL_RLP, SHA3_EMPTY};
|
||||||
|
use util::hash::{Address, FixedHash, H256};
|
||||||
|
use util::rlp::{UntrustedRlp, View};
|
||||||
|
use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode};
|
||||||
|
|
||||||
|
use super::Account;
|
||||||
|
|
||||||
|
fn fill_storage(mut db: AccountDBMut) -> H256 {
|
||||||
|
let map = StandardMap {
|
||||||
|
alphabet: Alphabet::All,
|
||||||
|
min_key: 6,
|
||||||
|
journal_key: 6,
|
||||||
|
value_mode: ValueMode::Random,
|
||||||
|
count: 100
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut root = H256::new();
|
||||||
|
{
|
||||||
|
let mut trie = SecTrieDBMut::new(&mut db, &mut root);
|
||||||
|
for (k, v) in map.make() {
|
||||||
|
trie.insert(&k, &v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
root
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn encoding_basic() {
|
||||||
|
let mut db = get_temp_journal_db();
|
||||||
|
let mut db = &mut **db;
|
||||||
|
let addr = Address::random();
|
||||||
|
|
||||||
|
let account = Account {
|
||||||
|
nonce: 50.into(),
|
||||||
|
balance: 123456789.into(),
|
||||||
|
storage_root: SHA3_NULL_RLP,
|
||||||
|
code_hash: SHA3_EMPTY,
|
||||||
|
};
|
||||||
|
|
||||||
|
let thin_rlp = account.to_thin_rlp();
|
||||||
|
assert_eq!(Account::from_thin_rlp(&thin_rlp), account);
|
||||||
|
|
||||||
|
let fat_rlp = account.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr)).unwrap();
|
||||||
|
let fat_rlp = UntrustedRlp::new(&fat_rlp);
|
||||||
|
assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp).unwrap(), account);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn encoding_storage() {
|
||||||
|
let mut db = get_temp_journal_db();
|
||||||
|
let mut db = &mut **db;
|
||||||
|
let addr = Address::random();
|
||||||
|
|
||||||
|
let root = fill_storage(AccountDBMut::new(db.as_hashdb_mut(), &addr));
|
||||||
|
let account = Account {
|
||||||
|
nonce: 25.into(),
|
||||||
|
balance: 987654321.into(),
|
||||||
|
storage_root: root,
|
||||||
|
code_hash: SHA3_EMPTY,
|
||||||
|
};
|
||||||
|
|
||||||
|
let thin_rlp = account.to_thin_rlp();
|
||||||
|
assert_eq!(Account::from_thin_rlp(&thin_rlp), account);
|
||||||
|
|
||||||
|
let fat_rlp = account.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr)).unwrap();
|
||||||
|
let fat_rlp = UntrustedRlp::new(&fat_rlp);
|
||||||
|
assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp).unwrap(), account);
|
||||||
|
}
|
||||||
|
}
|
211
ethcore/src/snapshot/block.rs
Normal file
211
ethcore/src/snapshot/block.rs
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Block RLP compression.
|
||||||
|
|
||||||
|
// TODO [rob] remove when BlockRebuilder done.
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use block::Block;
|
||||||
|
use header::Header;
|
||||||
|
|
||||||
|
use views::BlockView;
|
||||||
|
use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View};
|
||||||
|
use util::{Bytes, Hashable, H256};
|
||||||
|
|
||||||
|
const HEADER_FIELDS: usize = 10;
|
||||||
|
const BLOCK_FIELDS: usize = 2;
|
||||||
|
|
||||||
|
pub struct AbridgedBlock {
|
||||||
|
rlp: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AbridgedBlock {
|
||||||
|
/// Create from a vector of bytes. Does no verification.
|
||||||
|
pub fn from_raw(rlp: Bytes) -> Self {
|
||||||
|
AbridgedBlock {
|
||||||
|
rlp: rlp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the inner bytes.
|
||||||
|
pub fn into_inner(self) -> Bytes {
|
||||||
|
self.rlp
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a full block view, trim out the parent hash and block number,
|
||||||
|
/// producing new rlp.
|
||||||
|
pub fn from_block_view(block_view: &BlockView) -> Self {
|
||||||
|
let header = block_view.header_view();
|
||||||
|
|
||||||
|
let seal_fields = header.seal();
|
||||||
|
|
||||||
|
// 10 header fields, unknown amount of seal fields, and 2 block fields.
|
||||||
|
let mut stream = RlpStream::new_list(
|
||||||
|
HEADER_FIELDS +
|
||||||
|
seal_fields.len() +
|
||||||
|
BLOCK_FIELDS
|
||||||
|
);
|
||||||
|
|
||||||
|
// write header values.
|
||||||
|
stream
|
||||||
|
.append(&header.author())
|
||||||
|
.append(&header.state_root())
|
||||||
|
.append(&header.transactions_root())
|
||||||
|
.append(&header.receipts_root())
|
||||||
|
.append(&header.log_bloom())
|
||||||
|
.append(&header.difficulty())
|
||||||
|
.append(&header.gas_limit())
|
||||||
|
.append(&header.gas_used())
|
||||||
|
.append(&header.timestamp())
|
||||||
|
.append(&header.extra_data());
|
||||||
|
|
||||||
|
// write block values.
|
||||||
|
stream.append(&block_view.transactions()).append(&block_view.uncles());
|
||||||
|
|
||||||
|
// write seal fields.
|
||||||
|
for field in seal_fields {
|
||||||
|
stream.append_raw(&field, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
AbridgedBlock {
|
||||||
|
rlp: stream.out(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flesh out an abridged block view with the provided parent hash and block number.
|
||||||
|
///
|
||||||
|
/// Will fail if contains invalid rlp.
|
||||||
|
pub fn to_block(&self, parent_hash: H256, number: u64) -> Result<Block, DecoderError> {
|
||||||
|
let rlp = UntrustedRlp::new(&self.rlp);
|
||||||
|
|
||||||
|
let mut header = Header {
|
||||||
|
parent_hash: parent_hash,
|
||||||
|
author: try!(rlp.val_at(0)),
|
||||||
|
state_root: try!(rlp.val_at(1)),
|
||||||
|
transactions_root: try!(rlp.val_at(2)),
|
||||||
|
receipts_root: try!(rlp.val_at(3)),
|
||||||
|
log_bloom: try!(rlp.val_at(4)),
|
||||||
|
difficulty: try!(rlp.val_at(5)),
|
||||||
|
number: number,
|
||||||
|
gas_limit: try!(rlp.val_at(6)),
|
||||||
|
gas_used: try!(rlp.val_at(7)),
|
||||||
|
timestamp: try!(rlp.val_at(8)),
|
||||||
|
extra_data: try!(rlp.val_at(9)),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let transactions = try!(rlp.val_at(10));
|
||||||
|
let uncles: Vec<Header> = try!(rlp.val_at(11));
|
||||||
|
|
||||||
|
// iterator-based approach is cleaner but doesn't work w/ try.
|
||||||
|
let seal = {
|
||||||
|
let mut seal = Vec::new();
|
||||||
|
|
||||||
|
for i in 12..rlp.item_count() {
|
||||||
|
seal.push(try!(rlp.val_at(i)));
|
||||||
|
}
|
||||||
|
|
||||||
|
seal
|
||||||
|
};
|
||||||
|
|
||||||
|
header.set_seal(seal);
|
||||||
|
|
||||||
|
let uncle_bytes = uncles.iter()
|
||||||
|
.fold(RlpStream::new_list(uncles.len()), |mut s, u| {
|
||||||
|
s.append_raw(&u.rlp(::basic_types::Seal::With), 1);
|
||||||
|
s
|
||||||
|
}).out();
|
||||||
|
header.uncles_hash = uncle_bytes.sha3();
|
||||||
|
|
||||||
|
Ok(Block {
|
||||||
|
header: header,
|
||||||
|
transactions: transactions,
|
||||||
|
uncles: uncles,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use views::BlockView;
|
||||||
|
use block::Block;
|
||||||
|
use super::AbridgedBlock;
|
||||||
|
use types::transaction::{Action, Transaction};
|
||||||
|
|
||||||
|
use util::numbers::U256;
|
||||||
|
use util::hash::{Address, H256, FixedHash};
|
||||||
|
use util::{Bytes, RlpStream, Stream};
|
||||||
|
|
||||||
|
fn encode_block(b: &Block) -> Bytes {
|
||||||
|
let mut s = RlpStream::new_list(3);
|
||||||
|
|
||||||
|
b.header.stream_rlp(&mut s, ::basic_types::Seal::With);
|
||||||
|
s.append(&b.transactions);
|
||||||
|
s.append(&b.uncles);
|
||||||
|
|
||||||
|
s.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn empty_block_abridging() {
|
||||||
|
let b = Block::default();
|
||||||
|
let encoded = encode_block(&b);
|
||||||
|
|
||||||
|
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
|
||||||
|
assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic]
|
||||||
|
fn wrong_number() {
|
||||||
|
let b = Block::default();
|
||||||
|
let encoded = encode_block(&b);
|
||||||
|
|
||||||
|
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded));
|
||||||
|
assert_eq!(abridged.to_block(H256::new(), 2).unwrap(), b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn with_transactions() {
|
||||||
|
let mut b = Block::default();
|
||||||
|
|
||||||
|
let t1 = Transaction {
|
||||||
|
action: Action::Create,
|
||||||
|
nonce: U256::from(42),
|
||||||
|
gas_price: U256::from(3000),
|
||||||
|
gas: U256::from(50_000),
|
||||||
|
value: U256::from(1),
|
||||||
|
data: b"Hello!".to_vec()
|
||||||
|
}.fake_sign(Address::from(0x69));
|
||||||
|
|
||||||
|
let t2 = Transaction {
|
||||||
|
action: Action::Create,
|
||||||
|
nonce: U256::from(88),
|
||||||
|
gas_price: U256::from(12345),
|
||||||
|
gas: U256::from(300000),
|
||||||
|
value: U256::from(1000000000),
|
||||||
|
data: "Eep!".into(),
|
||||||
|
}.fake_sign(Address::from(0x55));
|
||||||
|
|
||||||
|
b.transactions.push(t1);
|
||||||
|
b.transactions.push(t2);
|
||||||
|
|
||||||
|
let encoded = encode_block(&b);
|
||||||
|
|
||||||
|
let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..]));
|
||||||
|
assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b);
|
||||||
|
}
|
||||||
|
}
|
417
ethcore/src/snapshot/mod.rs
Normal file
417
ethcore/src/snapshot/mod.rs
Normal file
@ -0,0 +1,417 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Snapshot creation helpers.
|
||||||
|
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::fs::{create_dir_all, File};
|
||||||
|
use std::io::Write;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use account_db::{AccountDB, AccountDBMut};
|
||||||
|
use client::BlockChainClient;
|
||||||
|
use error::Error;
|
||||||
|
use ids::BlockID;
|
||||||
|
use views::{BlockView, HeaderView};
|
||||||
|
|
||||||
|
use util::{Bytes, Hashable, HashDB, JournalDB, snappy, TrieDB, TrieDBMut, TrieMut};
|
||||||
|
use util::hash::{FixedHash, H256};
|
||||||
|
use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View};
|
||||||
|
|
||||||
|
use self::account::Account;
|
||||||
|
use self::block::AbridgedBlock;
|
||||||
|
|
||||||
|
use crossbeam::{scope, ScopedJoinHandle};
|
||||||
|
|
||||||
|
mod account;
|
||||||
|
mod block;
|
||||||
|
|
||||||
|
// Try to have chunks be around 16MB (before compression)
|
||||||
|
const PREFERRED_CHUNK_SIZE: usize = 16 * 1024 * 1024;
|
||||||
|
|
||||||
|
/// Take a snapshot using the given client and database, writing into `path`.
|
||||||
|
pub fn take_snapshot(client: &BlockChainClient, mut path: PathBuf, state_db: &HashDB) -> Result<(), Error> {
|
||||||
|
let chain_info = client.chain_info();
|
||||||
|
|
||||||
|
let genesis_hash = chain_info.genesis_hash;
|
||||||
|
let best_header_raw = client.best_block_header();
|
||||||
|
let best_header = HeaderView::new(&best_header_raw);
|
||||||
|
let state_root = best_header.state_root();
|
||||||
|
|
||||||
|
trace!(target: "snapshot", "Taking snapshot starting at block {}", best_header.number());
|
||||||
|
|
||||||
|
let _ = create_dir_all(&path);
|
||||||
|
|
||||||
|
let state_hashes = try!(chunk_state(state_db, &state_root, &path));
|
||||||
|
let block_hashes = try!(chunk_blocks(client, best_header.hash(), genesis_hash, &path));
|
||||||
|
|
||||||
|
trace!(target: "snapshot", "produced {} state chunks and {} block chunks.", state_hashes.len(), block_hashes.len());
|
||||||
|
|
||||||
|
let manifest_data = ManifestData {
|
||||||
|
state_hashes: state_hashes,
|
||||||
|
block_hashes: block_hashes,
|
||||||
|
state_root: state_root,
|
||||||
|
block_number: chain_info.best_block_number,
|
||||||
|
block_hash: chain_info.best_block_hash,
|
||||||
|
};
|
||||||
|
|
||||||
|
path.push("MANIFEST");
|
||||||
|
|
||||||
|
let mut manifest_file = try!(File::create(&path));
|
||||||
|
|
||||||
|
try!(manifest_file.write_all(&manifest_data.to_rlp()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// shared portion of write_chunk
|
||||||
|
// returns either a (hash, compressed_size) pair or an io error.
|
||||||
|
fn write_chunk(raw_data: &[u8], compression_buffer: &mut Vec<u8>, path: &Path) -> Result<(H256, usize), Error> {
|
||||||
|
let compressed_size = snappy::compress_into(raw_data, compression_buffer);
|
||||||
|
let compressed = &compression_buffer[..compressed_size];
|
||||||
|
let hash = compressed.sha3();
|
||||||
|
|
||||||
|
let mut file_path = path.to_owned();
|
||||||
|
file_path.push(hash.hex());
|
||||||
|
|
||||||
|
let mut file = try!(File::create(file_path));
|
||||||
|
try!(file.write_all(compressed));
|
||||||
|
|
||||||
|
Ok((hash, compressed_size))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used to build block chunks.
|
||||||
|
struct BlockChunker<'a> {
|
||||||
|
client: &'a BlockChainClient,
|
||||||
|
// block, receipt rlp pairs.
|
||||||
|
rlps: VecDeque<Bytes>,
|
||||||
|
current_hash: H256,
|
||||||
|
hashes: Vec<H256>,
|
||||||
|
snappy_buffer: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> BlockChunker<'a> {
|
||||||
|
// Repeatedly fill the buffers and writes out chunks, moving backwards from starting block hash.
|
||||||
|
// Loops until we reach the genesis, and writes out the remainder.
|
||||||
|
fn chunk_all(&mut self, genesis_hash: H256, path: &Path) -> Result<(), Error> {
|
||||||
|
let mut loaded_size = 0;
|
||||||
|
|
||||||
|
while self.current_hash != genesis_hash {
|
||||||
|
let block = self.client.block(BlockID::Hash(self.current_hash))
|
||||||
|
.expect("started from the head of chain and walking backwards; client stores full chain; qed");
|
||||||
|
let view = BlockView::new(&block);
|
||||||
|
let abridged_rlp = AbridgedBlock::from_block_view(&view).into_inner();
|
||||||
|
|
||||||
|
let receipts = self.client.block_receipts(&self.current_hash)
|
||||||
|
.expect("started from head of chain and walking backwards; client stores full chain; qed");
|
||||||
|
|
||||||
|
let pair = {
|
||||||
|
let mut pair_stream = RlpStream::new_list(2);
|
||||||
|
pair_stream.append(&abridged_rlp).append(&receipts);
|
||||||
|
pair_stream.out()
|
||||||
|
};
|
||||||
|
|
||||||
|
let new_loaded_size = loaded_size + pair.len();
|
||||||
|
|
||||||
|
// cut off the chunk if too large
|
||||||
|
if new_loaded_size > PREFERRED_CHUNK_SIZE {
|
||||||
|
let header = view.header_view();
|
||||||
|
try!(self.write_chunk(header.parent_hash(), header.number(), path));
|
||||||
|
loaded_size = pair.len();
|
||||||
|
} else {
|
||||||
|
loaded_size = new_loaded_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.rlps.push_front(pair);
|
||||||
|
self.current_hash = view.header_view().parent_hash();
|
||||||
|
}
|
||||||
|
|
||||||
|
if loaded_size != 0 {
|
||||||
|
// we don't store the genesis block, so once we get to this point,
|
||||||
|
// the "first" block will be number 1.
|
||||||
|
try!(self.write_chunk(genesis_hash, 1, path));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// write out the data in the buffers to a chunk on disk
|
||||||
|
fn write_chunk(&mut self, parent_hash: H256, number: u64, path: &Path) -> Result<(), Error> {
|
||||||
|
trace!(target: "snapshot", "prepared block chunk with {} blocks", self.rlps.len());
|
||||||
|
let mut rlp_stream = RlpStream::new_list(self.rlps.len() + 2);
|
||||||
|
rlp_stream.append(&parent_hash).append(&number);
|
||||||
|
for pair in self.rlps.drain(..) {
|
||||||
|
rlp_stream.append_raw(&pair, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let raw_data = rlp_stream.out();
|
||||||
|
let (hash, size) = try!(write_chunk(&raw_data, &mut self.snappy_buffer, path));
|
||||||
|
trace!(target: "snapshot", "wrote block chunk. hash: {}, size: {}, uncompressed size: {}", hash.hex(), size, raw_data.len());
|
||||||
|
|
||||||
|
self.hashes.push(hash);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create and write out all block chunks to disk, returning a vector of all
|
||||||
|
/// the hashes of block chunks created.
|
||||||
|
///
|
||||||
|
/// The path parameter is the directory to store the block chunks in.
|
||||||
|
/// This function assumes the directory exists already.
|
||||||
|
pub fn chunk_blocks(client: &BlockChainClient, best_block_hash: H256, genesis_hash: H256, path: &Path) -> Result<Vec<H256>, Error> {
|
||||||
|
let mut chunker = BlockChunker {
|
||||||
|
client: client,
|
||||||
|
rlps: VecDeque::new(),
|
||||||
|
current_hash: best_block_hash,
|
||||||
|
hashes: Vec::new(),
|
||||||
|
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||||
|
};
|
||||||
|
|
||||||
|
try!(chunker.chunk_all(genesis_hash, path));
|
||||||
|
|
||||||
|
Ok(chunker.hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// State trie chunker.
|
||||||
|
struct StateChunker<'a> {
|
||||||
|
hashes: Vec<H256>,
|
||||||
|
rlps: Vec<Bytes>,
|
||||||
|
cur_size: usize,
|
||||||
|
snapshot_path: &'a Path,
|
||||||
|
snappy_buffer: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> StateChunker<'a> {
|
||||||
|
// Push a key, value pair to be encoded.
|
||||||
|
//
|
||||||
|
// If the buffer is greater than the desired chunk size,
|
||||||
|
// this will write out the data to disk.
|
||||||
|
fn push(&mut self, account_hash: Bytes, data: Bytes) -> Result<(), Error> {
|
||||||
|
let pair = {
|
||||||
|
let mut stream = RlpStream::new_list(2);
|
||||||
|
stream.append(&account_hash).append_raw(&data, 1);
|
||||||
|
stream.out()
|
||||||
|
};
|
||||||
|
|
||||||
|
if self.cur_size + pair.len() >= PREFERRED_CHUNK_SIZE {
|
||||||
|
try!(self.write_chunk());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.cur_size += pair.len();
|
||||||
|
self.rlps.push(pair);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write out the buffer to disk, pushing the created chunk's hash to
|
||||||
|
// the list.
|
||||||
|
fn write_chunk(&mut self) -> Result<(), Error> {
|
||||||
|
let mut stream = RlpStream::new_list(self.rlps.len());
|
||||||
|
for rlp in self.rlps.drain(..) {
|
||||||
|
stream.append_raw(&rlp, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let raw_data = stream.out();
|
||||||
|
let (hash, compressed_size) = try!(write_chunk(&raw_data, &mut self.snappy_buffer, self.snapshot_path));
|
||||||
|
trace!(target: "snapshot", "wrote state chunk. size: {}, uncompressed size: {}", compressed_size, raw_data.len());
|
||||||
|
|
||||||
|
self.hashes.push(hash);
|
||||||
|
self.cur_size = 0;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Walk the given state database starting from the given root,
|
||||||
|
/// creating chunks and writing them out.
|
||||||
|
///
|
||||||
|
/// Returns a list of hashes of chunks created, or any error it may
|
||||||
|
/// have encountered.
|
||||||
|
pub fn chunk_state(db: &HashDB, root: &H256, path: &Path) -> Result<Vec<H256>, Error> {
|
||||||
|
let account_view = try!(TrieDB::new(db, &root));
|
||||||
|
|
||||||
|
let mut chunker = StateChunker {
|
||||||
|
hashes: Vec::new(),
|
||||||
|
rlps: Vec::new(),
|
||||||
|
cur_size: 0,
|
||||||
|
snapshot_path: path,
|
||||||
|
snappy_buffer: vec![0; snappy::max_compressed_len(PREFERRED_CHUNK_SIZE)],
|
||||||
|
};
|
||||||
|
|
||||||
|
trace!(target: "snapshot", "beginning state chunking");
|
||||||
|
|
||||||
|
// account_key here is the address' hash.
|
||||||
|
for (account_key, account_data) in account_view.iter() {
|
||||||
|
let account = Account::from_thin_rlp(account_data);
|
||||||
|
let account_key_hash = H256::from_slice(&account_key);
|
||||||
|
|
||||||
|
let account_db = AccountDB::from_hash(db, account_key_hash);
|
||||||
|
|
||||||
|
let fat_rlp = try!(account.to_fat_rlp(&account_db));
|
||||||
|
try!(chunker.push(account_key, fat_rlp));
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunker.cur_size != 0 {
|
||||||
|
try!(chunker.write_chunk());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(chunker.hashes)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Manifest data.
|
||||||
|
pub struct ManifestData {
|
||||||
|
/// List of state chunk hashes.
|
||||||
|
pub state_hashes: Vec<H256>,
|
||||||
|
/// List of block chunk hashes.
|
||||||
|
pub block_hashes: Vec<H256>,
|
||||||
|
/// The final, expected state root.
|
||||||
|
pub state_root: H256,
|
||||||
|
/// Block number this snapshot was taken at.
|
||||||
|
pub block_number: u64,
|
||||||
|
/// Block hash this snapshot was taken at.
|
||||||
|
pub block_hash: H256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ManifestData {
|
||||||
|
/// Encode the manifest data to rlp.
|
||||||
|
pub fn to_rlp(self) -> Bytes {
|
||||||
|
let mut stream = RlpStream::new_list(5);
|
||||||
|
stream.append(&self.state_hashes);
|
||||||
|
stream.append(&self.block_hashes);
|
||||||
|
stream.append(&self.state_root);
|
||||||
|
stream.append(&self.block_number);
|
||||||
|
stream.append(&self.block_hash);
|
||||||
|
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to restore manifest data from raw bytes, interpreted as RLP.
|
||||||
|
pub fn from_rlp(raw: &[u8]) -> Result<Self, DecoderError> {
|
||||||
|
let decoder = UntrustedRlp::new(raw);
|
||||||
|
|
||||||
|
let state_hashes: Vec<H256> = try!(decoder.val_at(0));
|
||||||
|
let block_hashes: Vec<H256> = try!(decoder.val_at(1));
|
||||||
|
let state_root: H256 = try!(decoder.val_at(2));
|
||||||
|
let block_number: u64 = try!(decoder.val_at(3));
|
||||||
|
let block_hash: H256 = try!(decoder.val_at(4));
|
||||||
|
|
||||||
|
Ok(ManifestData {
|
||||||
|
state_hashes: state_hashes,
|
||||||
|
block_hashes: block_hashes,
|
||||||
|
state_root: state_root,
|
||||||
|
block_number: block_number,
|
||||||
|
block_hash: block_hash,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used to rebuild the state trie piece by piece.
|
||||||
|
pub struct StateRebuilder {
|
||||||
|
db: Box<JournalDB>,
|
||||||
|
state_root: H256,
|
||||||
|
snappy_buffer: Vec<u8>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StateRebuilder {
|
||||||
|
/// Create a new state rebuilder to write into the given backing DB.
|
||||||
|
pub fn new(db: Box<JournalDB>) -> Self {
|
||||||
|
StateRebuilder {
|
||||||
|
db: db,
|
||||||
|
state_root: H256::zero(),
|
||||||
|
snappy_buffer: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Feed a compressed state chunk into the rebuilder.
|
||||||
|
pub fn feed(&mut self, compressed: &[u8]) -> Result<(), Error> {
|
||||||
|
let len = try!(snappy::decompress_into(compressed, &mut self.snappy_buffer));
|
||||||
|
let rlp = UntrustedRlp::new(&self.snappy_buffer[..len]);
|
||||||
|
let account_fat_rlps: Vec<_> = rlp.iter().map(|r| r.as_raw()).collect();
|
||||||
|
let mut pairs = Vec::with_capacity(rlp.item_count());
|
||||||
|
|
||||||
|
// initialize the pairs vector with empty values so we have slots to write into.
|
||||||
|
for _ in 0..rlp.item_count() {
|
||||||
|
pairs.push((H256::new(), Vec::new()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let chunk_size = account_fat_rlps.len() / ::num_cpus::get();
|
||||||
|
|
||||||
|
// build account tries in parallel.
|
||||||
|
try!(scope(|scope| {
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) {
|
||||||
|
let mut db = self.db.boxed_clone();
|
||||||
|
let handle: ScopedJoinHandle<Result<(), Error>> = scope.spawn(move || {
|
||||||
|
try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk));
|
||||||
|
|
||||||
|
// commit the db changes we made in this thread.
|
||||||
|
try!(db.commit(0, &H256::zero(), None));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
// see if we got any errors.
|
||||||
|
for handle in handles {
|
||||||
|
try!(handle.join());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok::<_, Error>(())
|
||||||
|
}));
|
||||||
|
|
||||||
|
// batch trie writes
|
||||||
|
{
|
||||||
|
let mut account_trie = if self.state_root != H256::zero() {
|
||||||
|
try!(TrieDBMut::from_existing(self.db.as_hashdb_mut(), &mut self.state_root))
|
||||||
|
} else {
|
||||||
|
TrieDBMut::new(self.db.as_hashdb_mut(), &mut self.state_root)
|
||||||
|
};
|
||||||
|
|
||||||
|
for (hash, thin_rlp) in pairs {
|
||||||
|
account_trie.insert(&hash, &thin_rlp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try!(self.db.commit(0, &H256::zero(), None));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the state root of the rebuilder.
|
||||||
|
pub fn state_root(&self) -> H256 { self.state_root }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mut [(H256, Bytes)]) -> Result<(), Error> {
|
||||||
|
for (account_pair, out) in account_chunk.into_iter().zip(out_chunk) {
|
||||||
|
let account_rlp = UntrustedRlp::new(account_pair);
|
||||||
|
|
||||||
|
let hash: H256 = try!(account_rlp.val_at(0));
|
||||||
|
let fat_rlp = try!(account_rlp.at(1));
|
||||||
|
|
||||||
|
let thin_rlp = {
|
||||||
|
let mut acct_db = AccountDBMut::from_hash(db.as_hashdb_mut(), hash);
|
||||||
|
|
||||||
|
// fill out the storage trie and code while decoding.
|
||||||
|
let acc = try!(Account::from_fat_rlp(&mut acct_db, fat_rlp));
|
||||||
|
|
||||||
|
acc.to_thin_rlp()
|
||||||
|
};
|
||||||
|
|
||||||
|
*out = (hash, thin_rlp);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -143,7 +143,7 @@ impl Spec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the known knodes of the network in enode format.
|
/// Get the known knodes of the network in enode format.
|
||||||
pub fn nodes(&self) -> &Vec<String> { &self.nodes }
|
pub fn nodes(&self) -> &[String] { &self.nodes }
|
||||||
|
|
||||||
/// Get the configured Network ID.
|
/// Get the configured Network ID.
|
||||||
pub fn network_id(&self) -> U256 { self.params.network_id }
|
pub fn network_id(&self) -> U256 { self.params.network_id }
|
||||||
|
@ -4,6 +4,14 @@ description = "Parity's EVM implementation"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["Ethcore <admin@ethcore.io>"]
|
authors = ["Ethcore <admin@ethcore.io>"]
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "evm"
|
||||||
|
path = "./src/main.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "evm"
|
||||||
|
path = "./src/main.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rustc-serialize = "0.3"
|
rustc-serialize = "0.3"
|
||||||
docopt = { version = "0.6" }
|
docopt = { version = "0.6" }
|
||||||
|
23
evmbin/bench.sh
Executable file
23
evmbin/bench.sh
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cargo build --release
|
||||||
|
|
||||||
|
# LOOP TEST
|
||||||
|
CODE1=606060405260005b620f42408112156019575b6001016007565b600081905550600680602b6000396000f3606060405200
|
||||||
|
ethvm --code $CODE1
|
||||||
|
echo "^^^^ ethvm"
|
||||||
|
./target/release/evm stats --code $CODE1 --gas 4402000
|
||||||
|
echo "^^^^ usize"
|
||||||
|
./target/release/evm stats --code $CODE1
|
||||||
|
echo "^^^^ U256"
|
||||||
|
|
||||||
|
# RNG TEST
|
||||||
|
CODE2=6060604052600360056007600b60005b620f4240811215607f5767ffe7649d5eca84179490940267f47ed85c4b9a6379019367f8e5dd9a5c994bba9390930267f91d87e4b8b74e55019267ff97f6f3b29cda529290920267f393ada8dd75c938019167fe8d437c45bb3735830267f47d9a7b5428ffec019150600101600f565b838518831882186000555050505050600680609a6000396000f3606060405200
|
||||||
|
ethvm --code $CODE2
|
||||||
|
echo "^^^^ ethvm"
|
||||||
|
./target/release/evm stats --code $CODE2 --gas 143020115
|
||||||
|
echo "^^^^ usize"
|
||||||
|
./target/release/evm stats --code $CODE2
|
||||||
|
echo "^^^^ U256"
|
85
evmbin/benches/mod.rs
Normal file
85
evmbin/benches/mod.rs
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! benchmarking for EVM
|
||||||
|
//! should be started with:
|
||||||
|
//! ```bash
|
||||||
|
//! multirust run nightly cargo bench
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
#![feature(test)]
|
||||||
|
|
||||||
|
extern crate test;
|
||||||
|
extern crate ethcore;
|
||||||
|
extern crate evm;
|
||||||
|
extern crate ethcore_util;
|
||||||
|
extern crate rustc_serialize;
|
||||||
|
|
||||||
|
use self::test::{Bencher, black_box};
|
||||||
|
|
||||||
|
use evm::run_vm;
|
||||||
|
use ethcore::action_params::ActionParams;
|
||||||
|
use ethcore_util::{U256, Uint};
|
||||||
|
use rustc_serialize::hex::FromHex;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn simple_loop_usize(b: &mut Bencher) {
|
||||||
|
simple_loop(U256::from(::std::usize::MAX), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn simple_loop_u256(b: &mut Bencher) {
|
||||||
|
simple_loop(!U256::zero(), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn simple_loop(gas: U256, b: &mut Bencher) {
|
||||||
|
let code = black_box(
|
||||||
|
"606060405260005b620042408112156019575b6001016007565b600081905550600680602b6000396000f3606060405200".from_hex().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.gas = gas;
|
||||||
|
params.code = Some(code.clone());
|
||||||
|
|
||||||
|
run_vm(params)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn rng_usize(b: &mut Bencher) {
|
||||||
|
rng(U256::from(::std::usize::MAX), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn rng_u256(b: &mut Bencher) {
|
||||||
|
rng(!U256::zero(), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rng(gas: U256, b: &mut Bencher) {
|
||||||
|
let code = black_box(
|
||||||
|
"6060604052600360056007600b60005b62004240811215607f5767ffe7649d5eca84179490940267f47ed85c4b9a6379019367f8e5dd9a5c994bba9390930267f91d87e4b8b74e55019267ff97f6f3b29cda529290920267f393ada8dd75c938019167fe8d437c45bb3735830267f47d9a7b5428ffec019150600101600f565b838518831882186000555050505050600680609a6000396000f3606060405200".from_hex().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.gas = gas;
|
||||||
|
params.code = Some(code.clone());
|
||||||
|
|
||||||
|
run_vm(params)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
@ -17,6 +17,7 @@
|
|||||||
//! Parity EVM interpreter binary.
|
//! Parity EVM interpreter binary.
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
#![warn(missing_docs)]
|
||||||
|
#![allow(dead_code)]
|
||||||
extern crate ethcore;
|
extern crate ethcore;
|
||||||
extern crate rustc_serialize;
|
extern crate rustc_serialize;
|
||||||
extern crate docopt;
|
extern crate docopt;
|
||||||
@ -25,7 +26,7 @@ extern crate ethcore_util as util;
|
|||||||
|
|
||||||
mod ext;
|
mod ext;
|
||||||
|
|
||||||
use std::time::Instant;
|
use std::time::{Instant, Duration};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use docopt::Docopt;
|
use docopt::Docopt;
|
||||||
use util::{U256, FromHex, Uint, Bytes};
|
use util::{U256, FromHex, Uint, Bytes};
|
||||||
@ -58,6 +59,15 @@ fn main() {
|
|||||||
params.code = Some(args.code());
|
params.code = Some(args.code());
|
||||||
params.data = args.data();
|
params.data = args.data();
|
||||||
|
|
||||||
|
let result = run_vm(params);
|
||||||
|
println!("Gas used: {:?}", result.gas_used);
|
||||||
|
println!("Output: {:?}", result.output);
|
||||||
|
println!("Time: {}.{:.9}s", result.time.as_secs(), result.time.subsec_nanos());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute VM with given `ActionParams`
|
||||||
|
pub fn run_vm(params: ActionParams) -> ExecutionResults {
|
||||||
|
let initial_gas = params.gas;
|
||||||
let factory = Factory::new(VMType::Interpreter);
|
let factory = Factory::new(VMType::Interpreter);
|
||||||
let mut vm = factory.create(params.gas);
|
let mut vm = factory.create(params.gas);
|
||||||
let mut ext = ext::FakeExt::default();
|
let mut ext = ext::FakeExt::default();
|
||||||
@ -66,9 +76,21 @@ fn main() {
|
|||||||
let gas_left = vm.exec(params, &mut ext).finalize(ext).expect("OK");
|
let gas_left = vm.exec(params, &mut ext).finalize(ext).expect("OK");
|
||||||
let duration = start.elapsed();
|
let duration = start.elapsed();
|
||||||
|
|
||||||
println!("Gas used: {:?}", args.gas() - gas_left);
|
ExecutionResults {
|
||||||
println!("Output: {:?}", "");
|
gas_used: initial_gas - gas_left,
|
||||||
println!("Time: {}.{:.9}s", duration.as_secs(), duration.subsec_nanos());
|
output: Vec::new(),
|
||||||
|
time: duration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// VM execution results
|
||||||
|
pub struct ExecutionResults {
|
||||||
|
/// Used gas
|
||||||
|
pub gas_used: U256,
|
||||||
|
/// Output as bytes
|
||||||
|
pub output: Vec<u8>,
|
||||||
|
/// Time Taken
|
||||||
|
pub time: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, RustcDecodable)]
|
#[derive(Debug, RustcDecodable)]
|
||||||
|
@ -62,24 +62,10 @@ pub fn expand_ipc_implementation(
|
|||||||
};
|
};
|
||||||
|
|
||||||
push_client(cx, &builder, &interface_map, push);
|
push_client(cx, &builder, &interface_map, push);
|
||||||
push_handshake_struct(cx, push);
|
|
||||||
|
|
||||||
push(Annotatable::Item(interface_map.item));
|
push(Annotatable::Item(interface_map.item));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn push_handshake_struct(cx: &ExtCtxt, push: &mut FnMut(Annotatable)) {
|
|
||||||
let handshake_item = quote_item!(cx,
|
|
||||||
#[derive(Binary)]
|
|
||||||
pub struct BinHandshake {
|
|
||||||
api_version: String,
|
|
||||||
protocol_version: String,
|
|
||||||
reserved: Vec<u8>,
|
|
||||||
}
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
push(Annotatable::Item(handshake_item));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn field_name(builder: &aster::AstBuilder, arg: &Arg) -> ast::Ident {
|
fn field_name(builder: &aster::AstBuilder, arg: &Arg) -> ast::Ident {
|
||||||
match arg.pat.node {
|
match arg.pat.node {
|
||||||
PatKind::Ident(_, ref ident, _) => builder.id(ident.node),
|
PatKind::Ident(_, ref ident, _) => builder.id(ident.node),
|
||||||
@ -601,15 +587,14 @@ fn push_client_implementation(
|
|||||||
|
|
||||||
let handshake_item = quote_impl_item!(cx,
|
let handshake_item = quote_impl_item!(cx,
|
||||||
pub fn handshake(&self) -> Result<(), ::ipc::Error> {
|
pub fn handshake(&self) -> Result<(), ::ipc::Error> {
|
||||||
let payload = BinHandshake {
|
let payload = ::ipc::Handshake {
|
||||||
protocol_version: $item_ident::protocol_version().to_string(),
|
protocol_version: $item_ident::protocol_version(),
|
||||||
api_version: $item_ident::api_version().to_string(),
|
api_version: $item_ident::api_version(),
|
||||||
reserved: vec![0u8; 64],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
::ipc::invoke(
|
::ipc::invoke(
|
||||||
0,
|
0,
|
||||||
&Some(::ipc::binary::serialize(&payload).unwrap()),
|
&Some(::ipc::binary::serialize(&::ipc::BinHandshake::from(payload)).unwrap()),
|
||||||
&mut *self.socket.write().unwrap());
|
&mut *self.socket.write().unwrap());
|
||||||
|
|
||||||
let mut result = vec![0u8; 1];
|
let mut result = vec![0u8; 1];
|
||||||
@ -673,18 +658,15 @@ fn implement_handshake_arm(
|
|||||||
) -> (ast::Arm, ast::Arm)
|
) -> (ast::Arm, ast::Arm)
|
||||||
{
|
{
|
||||||
let handshake_deserialize = quote_stmt!(&cx,
|
let handshake_deserialize = quote_stmt!(&cx,
|
||||||
let handshake_payload = ::ipc::binary::deserialize_from::<BinHandshake, _>(r).unwrap();
|
let handshake_payload = ::ipc::binary::deserialize_from::<::ipc::BinHandshake, _>(r).unwrap();
|
||||||
);
|
);
|
||||||
|
|
||||||
let handshake_deserialize_buf = quote_stmt!(&cx,
|
let handshake_deserialize_buf = quote_stmt!(&cx,
|
||||||
let handshake_payload = ::ipc::binary::deserialize::<BinHandshake>(buf).unwrap();
|
let handshake_payload = ::ipc::binary::deserialize::<::ipc::BinHandshake>(buf).unwrap();
|
||||||
);
|
);
|
||||||
|
|
||||||
let handshake_serialize = quote_expr!(&cx,
|
let handshake_serialize = quote_expr!(&cx,
|
||||||
::ipc::binary::serialize::<bool>(&Self::handshake(&::ipc::Handshake {
|
::ipc::binary::serialize::<bool>(&Self::handshake(&handshake_payload.to_semver())).unwrap()
|
||||||
api_version: ::semver::Version::parse(&handshake_payload.api_version).unwrap(),
|
|
||||||
protocol_version: ::semver::Version::parse(&handshake_payload.protocol_version).unwrap(),
|
|
||||||
})).unwrap()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
(
|
(
|
||||||
|
@ -8,6 +8,6 @@ license = "GPL-3.0"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethcore-devtools = { path = "../../devtools" }
|
ethcore-devtools = { path = "../../devtools" }
|
||||||
semver = "0.2.0"
|
|
||||||
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
|
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
|
||||||
ethcore-util = { path = "../../util" }
|
ethcore-util = { path = "../../util" }
|
||||||
|
semver = "0.2"
|
||||||
|
@ -21,6 +21,7 @@ use util::numbers::{U256, U512, H256, H2048, Address};
|
|||||||
use std::mem;
|
use std::mem;
|
||||||
use std::collections::{VecDeque, BTreeMap};
|
use std::collections::{VecDeque, BTreeMap};
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
use super::Handshake;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct BinaryConvertError;
|
pub struct BinaryConvertError;
|
||||||
@ -554,6 +555,61 @@ macro_rules! binary_fixed_size {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fixed-sized version of Handshake struct
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct BinHandshake {
|
||||||
|
api_version: BinVersion,
|
||||||
|
protocol_version: BinVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shorten version of semver Version without `pre` and `build` information
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub struct BinVersion {
|
||||||
|
pub major: u64,
|
||||||
|
pub minor: u64,
|
||||||
|
pub patch: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Handshake> for BinHandshake {
|
||||||
|
fn from(other: Handshake) -> Self {
|
||||||
|
BinHandshake {
|
||||||
|
api_version: BinVersion::from(other.api_version),
|
||||||
|
protocol_version: BinVersion::from(other.protocol_version),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinHandshake {
|
||||||
|
pub fn to_semver(self) -> Handshake {
|
||||||
|
Handshake {
|
||||||
|
api_version: self.api_version.to_semver(),
|
||||||
|
protocol_version: self.protocol_version.to_semver(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BinVersion {
|
||||||
|
pub fn to_semver(self) -> ::semver::Version {
|
||||||
|
::semver::Version {
|
||||||
|
major: self.major,
|
||||||
|
minor: self.minor,
|
||||||
|
patch: self.patch,
|
||||||
|
pre: vec![],
|
||||||
|
build: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<::semver::Version> for BinVersion {
|
||||||
|
fn from(other: ::semver::Version) -> Self {
|
||||||
|
BinVersion {
|
||||||
|
major: other.major,
|
||||||
|
minor: other.minor,
|
||||||
|
patch: other.patch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
binary_fixed_size!(u64);
|
binary_fixed_size!(u64);
|
||||||
binary_fixed_size!(u32);
|
binary_fixed_size!(u32);
|
||||||
binary_fixed_size!(usize);
|
binary_fixed_size!(usize);
|
||||||
@ -564,6 +620,7 @@ binary_fixed_size!(U512);
|
|||||||
binary_fixed_size!(H256);
|
binary_fixed_size!(H256);
|
||||||
binary_fixed_size!(H2048);
|
binary_fixed_size!(H2048);
|
||||||
binary_fixed_size!(Address);
|
binary_fixed_size!(Address);
|
||||||
|
binary_fixed_size!(BinHandshake);
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn vec_serialize() {
|
fn vec_serialize() {
|
||||||
@ -706,8 +763,6 @@ fn serialize_opt_vec() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn serialize_opt_vec_payload() {
|
fn serialize_opt_vec_payload() {
|
||||||
use std::io::Cursor;
|
|
||||||
|
|
||||||
let optional_vec: Option<Vec<u8>> = None;
|
let optional_vec: Option<Vec<u8>> = None;
|
||||||
let payload = serialize(&optional_vec).unwrap();
|
let payload = serialize(&optional_vec).unwrap();
|
||||||
|
|
||||||
@ -776,3 +831,23 @@ fn serialize_btree() {
|
|||||||
|
|
||||||
assert_eq!(res[&1u64], 5u64);
|
assert_eq!(res[&1u64], 5u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn serialize_handshake() {
|
||||||
|
use std::io::{Cursor, SeekFrom, Seek};
|
||||||
|
|
||||||
|
let mut buff = Cursor::new(Vec::new());
|
||||||
|
|
||||||
|
let handshake = Handshake {
|
||||||
|
api_version: ::semver::Version::parse("1.2.0").unwrap(),
|
||||||
|
protocol_version: ::semver::Version::parse("1.2.0").unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
serialize_into(&BinHandshake::from(handshake.clone()), &mut buff).unwrap();
|
||||||
|
|
||||||
|
buff.seek(SeekFrom::Start(0)).unwrap();
|
||||||
|
let res = deserialize_from::<BinHandshake, _>(&mut buff).unwrap().to_semver();
|
||||||
|
|
||||||
|
assert_eq!(res, handshake);
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@ use std::io::{Read, Write};
|
|||||||
use std::marker::Sync;
|
use std::marker::Sync;
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
/// Handshake for client and server to negotiate api/protocol version
|
/// Handshake for client and server to negotiate api/protocol version
|
||||||
pub struct Handshake {
|
pub struct Handshake {
|
||||||
pub protocol_version: Version,
|
pub protocol_version: Version,
|
||||||
|
@ -24,4 +24,4 @@ extern crate ethcore_util as util;
|
|||||||
pub mod interface;
|
pub mod interface;
|
||||||
pub mod binary;
|
pub mod binary;
|
||||||
pub use interface::{IpcInterface, IpcSocket, invoke, IpcConfig, Handshake, Error, WithSocket};
|
pub use interface::{IpcInterface, IpcSocket, invoke, IpcConfig, Handshake, Error, WithSocket};
|
||||||
pub use binary::{BinaryConvertable, BinaryConvertError};
|
pub use binary::{BinaryConvertable, BinaryConvertError, BinHandshake};
|
||||||
|
@ -10,7 +10,7 @@ rustc-serialize = "0.3"
|
|||||||
serde = "0.7.0"
|
serde = "0.7.0"
|
||||||
serde_json = "0.7.0"
|
serde_json = "0.7.0"
|
||||||
serde_macros = { version = "0.7.0", optional = true }
|
serde_macros = { version = "0.7.0", optional = true }
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
serde_codegen = { version = "0.7.0", optional = true }
|
serde_codegen = { version = "0.7.0", optional = true }
|
||||||
|
@ -40,9 +40,9 @@ impl Into<Vec<u8>> for Bytes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Deref for Bytes {
|
impl Deref for Bytes {
|
||||||
type Target = Vec<u8>;
|
type Target = [u8];
|
||||||
|
|
||||||
fn deref(&self) -> &Vec<u8> {
|
fn deref(&self) -> &[u8] {
|
||||||
&self.0
|
&self.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -238,7 +238,7 @@ impl Configuration {
|
|||||||
})
|
})
|
||||||
}).collect(),
|
}).collect(),
|
||||||
Some(_) => Vec::new(),
|
Some(_) => Vec::new(),
|
||||||
None => spec.nodes().clone(),
|
None => spec.nodes().to_owned(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,8 +21,8 @@ use self::ansi_term::Style;
|
|||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
use std::sync::RwLock;
|
use std::sync::RwLock;
|
||||||
use std::ops::{Deref, DerefMut};
|
use std::ops::{Deref, DerefMut};
|
||||||
use ethsync::{EthSync, SyncProvider};
|
use ethsync::SyncStatus;
|
||||||
use util::{Uint, RwLockable, NetworkService};
|
use util::{Uint, RwLockable, NetworkConfiguration};
|
||||||
use ethcore::client::*;
|
use ethcore::client::*;
|
||||||
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
use number_prefix::{binary_prefix, Standalone, Prefixed};
|
||||||
|
|
||||||
@ -75,7 +75,8 @@ impl Informant {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tick<Message>(&self, client: &Client, maybe_sync: Option<(&EthSync, &NetworkService<Message>)>) where Message: Send + Sync + Clone + 'static {
|
#[cfg_attr(feature="dev", allow(match_bool))]
|
||||||
|
pub fn tick(&self, client: &Client, maybe_status: Option<(SyncStatus, NetworkConfiguration)>) {
|
||||||
let elapsed = self.last_tick.unwrapped_read().elapsed();
|
let elapsed = self.last_tick.unwrapped_read().elapsed();
|
||||||
if elapsed < Duration::from_secs(5) {
|
if elapsed < Duration::from_secs(5) {
|
||||||
return;
|
return;
|
||||||
@ -108,10 +109,8 @@ impl Informant {
|
|||||||
paint(Yellow.bold(), format!("{:4}", ((report.transactions_applied - last_report.transactions_applied) * 1000) as u64 / elapsed.as_milliseconds())),
|
paint(Yellow.bold(), format!("{:4}", ((report.transactions_applied - last_report.transactions_applied) * 1000) as u64 / elapsed.as_milliseconds())),
|
||||||
paint(Yellow.bold(), format!("{:3}", ((report.gas_processed - last_report.gas_processed) / From::from(elapsed.as_milliseconds() * 1000)).low_u64())),
|
paint(Yellow.bold(), format!("{:3}", ((report.gas_processed - last_report.gas_processed) / From::from(elapsed.as_milliseconds() * 1000)).low_u64())),
|
||||||
|
|
||||||
match maybe_sync {
|
match maybe_status {
|
||||||
Some((sync, net)) => {
|
Some((ref sync_info, ref net_config)) => {
|
||||||
let sync_info = sync.status();
|
|
||||||
let net_config = net.config();
|
|
||||||
format!("{}/{}/{} peers {} ",
|
format!("{}/{}/{} peers {} ",
|
||||||
paint(Green.bold(), format!("{:2}", sync_info.num_active_peers)),
|
paint(Green.bold(), format!("{:2}", sync_info.num_active_peers)),
|
||||||
paint(Green.bold(), format!("{:2}", sync_info.num_peers)),
|
paint(Green.bold(), format!("{:2}", sync_info.num_peers)),
|
||||||
@ -128,13 +127,9 @@ impl Informant {
|
|||||||
paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(report.state_db_mem))),
|
paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(report.state_db_mem))),
|
||||||
paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(cache_info.total()))),
|
paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(cache_info.total()))),
|
||||||
paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(queue_info.mem_used))),
|
paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(queue_info.mem_used))),
|
||||||
match maybe_sync {
|
if let Some((ref sync_info, _)) = maybe_status {
|
||||||
Some((sync, _)) => {
|
format!(" {} sync", paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(sync_info.mem_used))))
|
||||||
let sync_info = sync.status();
|
} else { String::new() },
|
||||||
format!(" {} sync", paint(Purple.bold(), format!("{:>8}", Informant::format_bytes(sync_info.mem_used))))
|
|
||||||
}
|
|
||||||
None => String::new()
|
|
||||||
},
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,12 +14,12 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::Arc;
|
||||||
use ethcore::client::Client;
|
use ethcore::client::Client;
|
||||||
use ethcore::service::{NetSyncMessage, SyncMessage};
|
use ethcore::service::ClientIoMessage;
|
||||||
use ethsync::EthSync;
|
use ethsync::{EthSync, SyncProvider, ManageNetwork};
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use util::{TimerToken, IoHandler, IoContext, NetworkService, NetworkIoMessage};
|
use util::{TimerToken, IoHandler, IoContext};
|
||||||
|
|
||||||
use informant::Informant;
|
use informant::Informant;
|
||||||
|
|
||||||
@ -30,38 +30,18 @@ pub struct ClientIoHandler {
|
|||||||
pub sync: Arc<EthSync>,
|
pub sync: Arc<EthSync>,
|
||||||
pub accounts: Arc<AccountProvider>,
|
pub accounts: Arc<AccountProvider>,
|
||||||
pub info: Informant,
|
pub info: Informant,
|
||||||
pub network: Weak<NetworkService<SyncMessage>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
impl IoHandler<ClientIoMessage> for ClientIoHandler {
|
||||||
fn initialize(&self, io: &IoContext<NetSyncMessage>) {
|
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
||||||
io.register_timer(INFO_TIMER, 5000).expect("Error registering timer");
|
io.register_timer(INFO_TIMER, 5000).expect("Error registering timer");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) {
|
fn timeout(&self, _io: &IoContext<ClientIoMessage>, timer: TimerToken) {
|
||||||
if let INFO_TIMER = timer {
|
if let INFO_TIMER = timer {
|
||||||
if let Some(net) = self.network.upgrade() {
|
let sync_status = self.sync.status();
|
||||||
self.info.tick(&self.client, Some((&self.sync, &net)));
|
let network_config = self.sync.network_config();
|
||||||
}
|
self.info.tick(&self.client, Some((sync_status, network_config)));
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn message(&self, _io: &IoContext<NetSyncMessage>, message: &NetSyncMessage) {
|
|
||||||
match *message {
|
|
||||||
NetworkIoMessage::User(SyncMessage::StartNetwork) => {
|
|
||||||
if let Some(network) = self.network.upgrade() {
|
|
||||||
network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e));
|
|
||||||
EthSync::register(&*network, self.sync.clone()).unwrap_or_else(|e| warn!("Error registering eth protocol handler: {}", e));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
NetworkIoMessage::User(SyncMessage::StopNetwork) => {
|
|
||||||
if let Some(network) = self.network.upgrade() {
|
|
||||||
network.stop().unwrap_or_else(|e| warn!("Error stopping network: {:?}", e));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ => {/* Ignore other messages */},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,16 +46,14 @@ extern crate hyper; // for price_info.rs
|
|||||||
extern crate json_ipc_server as jsonipc;
|
extern crate json_ipc_server as jsonipc;
|
||||||
|
|
||||||
extern crate ethcore_ipc_hypervisor as hypervisor;
|
extern crate ethcore_ipc_hypervisor as hypervisor;
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
extern crate ethcore_rpc;
|
extern crate ethcore_rpc;
|
||||||
|
|
||||||
|
extern crate ethcore_signer;
|
||||||
|
extern crate ansi_term;
|
||||||
|
|
||||||
#[cfg(feature = "dapps")]
|
#[cfg(feature = "dapps")]
|
||||||
extern crate ethcore_dapps;
|
extern crate ethcore_dapps;
|
||||||
|
|
||||||
#[cfg(feature = "ethcore-signer")]
|
|
||||||
extern crate ethcore_signer;
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
mod die;
|
mod die;
|
||||||
mod upgrade;
|
mod upgrade;
|
||||||
@ -81,9 +79,10 @@ use std::thread::sleep;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use rustc_serialize::hex::FromHex;
|
use rustc_serialize::hex::FromHex;
|
||||||
use ctrlc::CtrlC;
|
use ctrlc::CtrlC;
|
||||||
use util::{Lockable, H256, ToPretty, NetworkConfiguration, PayloadInfo, Bytes, UtilError, Colour, Applyable, version, journaldb};
|
use util::{Lockable, H256, ToPretty, PayloadInfo, Bytes, Colour, Applyable, version, journaldb};
|
||||||
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
||||||
use ethcore::client::{Mode, BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError};
|
use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError,
|
||||||
|
ChainNotify, Mode};
|
||||||
use ethcore::error::{ImportError};
|
use ethcore::error::{ImportError};
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::ClientService;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
@ -231,13 +230,11 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
miner.set_transactions_limit(conf.args.flag_tx_queue_size);
|
miner.set_transactions_limit(conf.args.flag_tx_queue_size);
|
||||||
|
|
||||||
// Build client
|
// Build client
|
||||||
let mut service = ClientService::start(
|
let service = ClientService::start(
|
||||||
client_config,
|
client_config,
|
||||||
spec,
|
spec,
|
||||||
net_settings,
|
|
||||||
Path::new(&conf.path()),
|
Path::new(&conf.path()),
|
||||||
miner.clone(),
|
miner.clone(),
|
||||||
match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network }
|
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
).unwrap_or_else(|e| die_with_error("Client", e));
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
panic_handler.forward_from(&service);
|
||||||
@ -247,8 +244,14 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
let network_settings = Arc::new(conf.network_settings());
|
let network_settings = Arc::new(conf.network_settings());
|
||||||
|
|
||||||
// Sync
|
// Sync
|
||||||
let sync = EthSync::new(sync_config, client.clone());
|
let sync = EthSync::new(sync_config, client.clone(), net_settings)
|
||||||
EthSync::register(&*service.network(), sync.clone()).unwrap_or_else(|e| die_with_error("Error registering eth protocol handler", UtilError::from(e).into()));
|
.unwrap_or_else(|e| die_with_error("Sync", ethcore::error::Error::Util(e)));
|
||||||
|
service.set_notify(&(sync.clone() as Arc<ChainNotify>));
|
||||||
|
|
||||||
|
// if network is active by default
|
||||||
|
if match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network } {
|
||||||
|
sync.start();
|
||||||
|
}
|
||||||
|
|
||||||
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
|
let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies {
|
||||||
signer_port: conf.signer_port(),
|
signer_port: conf.signer_port(),
|
||||||
@ -261,7 +264,7 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
logger: logger.clone(),
|
logger: logger.clone(),
|
||||||
settings: network_settings.clone(),
|
settings: network_settings.clone(),
|
||||||
allow_pending_receipt_query: !conf.args.flag_geth,
|
allow_pending_receipt_query: !conf.args.flag_geth,
|
||||||
net_service: service.network(),
|
net_service: sync.clone(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let dependencies = rpc::Dependencies {
|
let dependencies = rpc::Dependencies {
|
||||||
@ -311,7 +314,6 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
info: Informant::new(conf.have_color()),
|
info: Informant::new(conf.have_color()),
|
||||||
sync: sync.clone(),
|
sync: sync.clone(),
|
||||||
accounts: account_service.clone(),
|
accounts: account_service.clone(),
|
||||||
network: Arc::downgrade(&service.network()),
|
|
||||||
});
|
});
|
||||||
service.register_io_handler(io_handler).expect("Error registering IO handler");
|
service.register_io_handler(io_handler).expect("Error registering IO handler");
|
||||||
|
|
||||||
@ -345,24 +347,11 @@ fn execute_export(conf: Configuration) {
|
|||||||
unsafe { ::fdlimit::raise_fd_limit(); }
|
unsafe { ::fdlimit::raise_fd_limit(); }
|
||||||
|
|
||||||
let spec = conf.spec();
|
let spec = conf.spec();
|
||||||
let net_settings = NetworkConfiguration {
|
|
||||||
config_path: None,
|
|
||||||
listen_address: None,
|
|
||||||
public_address: None,
|
|
||||||
udp_port: None,
|
|
||||||
nat_enabled: false,
|
|
||||||
discovery_enabled: false,
|
|
||||||
boot_nodes: Vec::new(),
|
|
||||||
use_secret: None,
|
|
||||||
ideal_peers: 0,
|
|
||||||
reserved_nodes: Vec::new(),
|
|
||||||
non_reserved_mode: ::util::network::NonReservedPeerMode::Accept,
|
|
||||||
};
|
|
||||||
let client_config = conf.client_config(&spec);
|
let client_config = conf.client_config(&spec);
|
||||||
|
|
||||||
// Build client
|
// Build client
|
||||||
let service = ClientService::start(
|
let service = ClientService::start(
|
||||||
client_config, spec, net_settings, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec())), false
|
client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec()))
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
).unwrap_or_else(|e| die_with_error("Client", e));
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
panic_handler.forward_from(&service);
|
||||||
@ -419,24 +408,11 @@ fn execute_import(conf: Configuration) {
|
|||||||
unsafe { ::fdlimit::raise_fd_limit(); }
|
unsafe { ::fdlimit::raise_fd_limit(); }
|
||||||
|
|
||||||
let spec = conf.spec();
|
let spec = conf.spec();
|
||||||
let net_settings = NetworkConfiguration {
|
|
||||||
config_path: None,
|
|
||||||
listen_address: None,
|
|
||||||
public_address: None,
|
|
||||||
udp_port: None,
|
|
||||||
nat_enabled: false,
|
|
||||||
discovery_enabled: false,
|
|
||||||
boot_nodes: Vec::new(),
|
|
||||||
use_secret: None,
|
|
||||||
ideal_peers: 0,
|
|
||||||
reserved_nodes: Vec::new(),
|
|
||||||
non_reserved_mode: ::util::network::NonReservedPeerMode::Accept,
|
|
||||||
};
|
|
||||||
let client_config = conf.client_config(&spec);
|
let client_config = conf.client_config(&spec);
|
||||||
|
|
||||||
// Build client
|
// Build client
|
||||||
let service = ClientService::start(
|
let service = ClientService::start(
|
||||||
client_config, spec, net_settings, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec())), false
|
client_config, spec, Path::new(&conf.path()), Arc::new(Miner::with_spec(conf.spec()))
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
).unwrap_or_else(|e| die_with_error("Client", e));
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
panic_handler.forward_from(&service);
|
||||||
@ -485,7 +461,7 @@ fn execute_import(conf: Configuration) {
|
|||||||
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); }
|
Err(BlockImportError::Import(ImportError::AlreadyInChain)) => { trace!("Skipping block already in chain."); }
|
||||||
Err(e) => die!("Cannot import block: {:?}", e)
|
Err(e) => die!("Cannot import block: {:?}", e)
|
||||||
}
|
}
|
||||||
informant.tick::<&'static ()>(client.deref(), None);
|
informant.tick(client.deref(), None);
|
||||||
};
|
};
|
||||||
|
|
||||||
match format {
|
match format {
|
||||||
|
@ -157,7 +157,7 @@ fn extras_database_migrations() -> Result<MigrationManager, Error> {
|
|||||||
fn state_database_migrations(pruning: Algorithm) -> Result<MigrationManager, Error> {
|
fn state_database_migrations(pruning: Algorithm) -> Result<MigrationManager, Error> {
|
||||||
let mut manager = MigrationManager::new(default_migration_settings());
|
let mut manager = MigrationManager::new(default_migration_settings());
|
||||||
let res = match pruning {
|
let res = match pruning {
|
||||||
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7),
|
Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()),
|
||||||
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
|
Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()),
|
||||||
_ => die!("Unsupported pruning method for migration. Delete DB and resync"),
|
_ => die!("Unsupported pruning method for migration. Delete DB and resync"),
|
||||||
};
|
};
|
||||||
|
@ -24,12 +24,8 @@ use jsonipc;
|
|||||||
use rpc_apis;
|
use rpc_apis;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
pub use ethcore_rpc::Server as RpcServer;
|
pub use ethcore_rpc::Server as RpcServer;
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
use ethcore_rpc::{RpcServerError, RpcServer as Server};
|
use ethcore_rpc::{RpcServerError, RpcServer as Server};
|
||||||
#[cfg(not(feature = "rpc"))]
|
|
||||||
pub struct RpcServer;
|
|
||||||
|
|
||||||
pub struct HttpConfiguration {
|
pub struct HttpConfiguration {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
@ -79,17 +75,6 @@ fn setup_rpc_server(apis: Vec<&str>, deps: &Dependencies) -> Server {
|
|||||||
rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::List(apis))
|
rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::List(apis))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "rpc"))]
|
|
||||||
pub fn setup_http_rpc_server(
|
|
||||||
_deps: &Dependencies,
|
|
||||||
_url: &SocketAddr,
|
|
||||||
_cors_domain: Vec<String>,
|
|
||||||
_apis: Vec<&str>,
|
|
||||||
) -> ! {
|
|
||||||
die!("Your Parity version has been compiled without JSON-RPC support.")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
pub fn setup_http_rpc_server(
|
pub fn setup_http_rpc_server(
|
||||||
dependencies: &Dependencies,
|
dependencies: &Dependencies,
|
||||||
url: &SocketAddr,
|
url: &SocketAddr,
|
||||||
@ -111,18 +96,12 @@ pub fn setup_http_rpc_server(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "rpc"))]
|
|
||||||
pub fn setup_ipc_rpc_server(_dependencies: &Dependencies, _addr: &str, _apis: Vec<&str>) -> ! {
|
|
||||||
die!("Your Parity version has been compiled without JSON-RPC support.")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Option<jsonipc::Server> {
|
pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Option<jsonipc::Server> {
|
||||||
if !conf.enabled { return None; }
|
if !conf.enabled { return None; }
|
||||||
let apis = conf.apis.split(',').collect();
|
let apis = conf.apis.split(',').collect();
|
||||||
Some(setup_ipc_rpc_server(deps, &conf.socket_addr, apis))
|
Some(setup_ipc_rpc_server(deps, &conf.socket_addr, apis))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "rpc")]
|
|
||||||
pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: Vec<&str>) -> jsonipc::Server {
|
pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: Vec<&str>) -> jsonipc::Server {
|
||||||
let server = setup_rpc_server(apis, dependencies);
|
let server = setup_rpc_server(apis, dependencies);
|
||||||
match server.start_ipc(addr) {
|
match server.start_ipc(addr) {
|
||||||
|
@ -18,21 +18,15 @@ use std::collections::BTreeMap;
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use ethsync::EthSync;
|
use ethsync::{EthSync, ManageNetwork};
|
||||||
use ethcore::miner::{Miner, ExternalMiner};
|
use ethcore::miner::{Miner, ExternalMiner};
|
||||||
use ethcore::client::Client;
|
use ethcore::client::Client;
|
||||||
use util::RotatingLogger;
|
use util::RotatingLogger;
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use util::network_settings::NetworkSettings;
|
use util::network_settings::NetworkSettings;
|
||||||
use util::network::NetworkService;
|
|
||||||
|
|
||||||
#[cfg(feature="rpc")]
|
|
||||||
pub use ethcore_rpc::ConfirmationsQueue;
|
pub use ethcore_rpc::ConfirmationsQueue;
|
||||||
#[cfg(not(feature="rpc"))]
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct ConfirmationsQueue;
|
|
||||||
|
|
||||||
#[cfg(feature="rpc")]
|
|
||||||
use ethcore_rpc::Extendable;
|
use ethcore_rpc::Extendable;
|
||||||
|
|
||||||
pub enum Api {
|
pub enum Api {
|
||||||
@ -89,7 +83,7 @@ pub struct Dependencies {
|
|||||||
pub logger: Arc<RotatingLogger>,
|
pub logger: Arc<RotatingLogger>,
|
||||||
pub settings: Arc<NetworkSettings>,
|
pub settings: Arc<NetworkSettings>,
|
||||||
pub allow_pending_receipt_query: bool,
|
pub allow_pending_receipt_query: bool,
|
||||||
pub net_service: Arc<NetworkService<::ethcore::service::SyncMessage>>,
|
pub net_service: Arc<ManageNetwork>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
|
fn to_modules(apis: &[Api]) -> BTreeMap<String, String> {
|
||||||
|
@ -15,23 +15,19 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use util::{Colour, Applyable};
|
use std::path::PathBuf;
|
||||||
use util::panics::{PanicHandler, ForwardPanic};
|
use ansi_term::Colour;
|
||||||
|
use util::panics::{ForwardPanic, PanicHandler};
|
||||||
use util::path::restrict_permissions_owner;
|
use util::path::restrict_permissions_owner;
|
||||||
use die::*;
|
use util::Applyable;
|
||||||
use rpc_apis;
|
use rpc_apis;
|
||||||
|
|
||||||
const CODES_FILENAME: &'static str = "authcodes";
|
|
||||||
|
|
||||||
#[cfg(feature = "ethcore-signer")]
|
|
||||||
use ethcore_signer as signer;
|
use ethcore_signer as signer;
|
||||||
#[cfg(feature = "ethcore-signer")]
|
use die::*;
|
||||||
|
|
||||||
pub use ethcore_signer::Server as SignerServer;
|
pub use ethcore_signer::Server as SignerServer;
|
||||||
|
|
||||||
#[cfg(not(feature = "ethcore-signer"))]
|
const CODES_FILENAME: &'static str = "authcodes";
|
||||||
pub struct SignerServer;
|
|
||||||
|
|
||||||
pub struct Configuration {
|
pub struct Configuration {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
@ -59,8 +55,6 @@ fn codes_path(path: String) -> PathBuf {
|
|||||||
p
|
p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[cfg(feature = "ethcore-signer")]
|
|
||||||
pub fn new_token(path: String) -> io::Result<()> {
|
pub fn new_token(path: String) -> io::Result<()> {
|
||||||
let path = codes_path(path);
|
let path = codes_path(path);
|
||||||
let mut codes = try!(signer::AuthCodes::from_file(&path));
|
let mut codes = try!(signer::AuthCodes::from_file(&path));
|
||||||
@ -70,7 +64,6 @@ pub fn new_token(path: String) -> io::Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "ethcore-signer")]
|
|
||||||
fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer {
|
fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer {
|
||||||
let addr = format!("127.0.0.1:{}", conf.port).parse().unwrap_or_else(|_| {
|
let addr = format!("127.0.0.1:{}", conf.port).parse().unwrap_or_else(|_| {
|
||||||
die!("Invalid port specified: {}", conf.port)
|
die!("Invalid port specified: {}", conf.port)
|
||||||
@ -95,13 +88,4 @@ fn do_start(conf: Configuration, deps: Dependencies) -> SignerServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "ethcore-signer"))]
|
|
||||||
fn do_start(_conf: Configuration) -> ! {
|
|
||||||
die!("Your Parity version has been compiled without Trusted Signer support.")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "ethcore-signer"))]
|
|
||||||
pub fn new_token(_path: String) -> ! {
|
|
||||||
die!("Your Parity version has been compiled without Trusted Signer support.")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ ethcore-devtools = { path = "../devtools" }
|
|||||||
rustc-serialize = "0.3"
|
rustc-serialize = "0.3"
|
||||||
transient-hashmap = "0.1"
|
transient-hashmap = "0.1"
|
||||||
serde_macros = { version = "0.7.0", optional = true }
|
serde_macros = { version = "0.7.0", optional = true }
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
|
json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
|
||||||
ethcore-ipc = { path = "../ipc/rpc" }
|
ethcore-ipc = { path = "../ipc/rpc" }
|
||||||
|
|
||||||
|
@ -508,7 +508,7 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
fn compilers(&self, params: Params) -> Result<Value, Error> {
|
fn compilers(&self, params: Params) -> Result<Value, Error> {
|
||||||
try!(self.active());
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&vec![] as &Vec<String>),
|
Params::None => to_value(&(&[] as &[String])),
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,26 +19,26 @@ use std::sync::{Arc, Weak};
|
|||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use ethcore::client::MiningBlockChainClient;
|
use ethcore::client::MiningBlockChainClient;
|
||||||
use ethcore::service::SyncMessage;
|
use ethsync::ManageNetwork;
|
||||||
use util::network::{NetworkService, NonReservedPeerMode};
|
use util::network::NonReservedPeerMode;
|
||||||
use v1::traits::EthcoreSet;
|
use v1::traits::EthcoreSet;
|
||||||
use v1::types::{Bytes, H160, U256};
|
use v1::types::{Bytes, H160, U256};
|
||||||
|
|
||||||
/// Ethcore-specific rpc interface for operations altering the settings.
|
/// Ethcore-specific rpc interface for operations altering the settings.
|
||||||
pub struct EthcoreSetClient<C, M> where
|
pub struct EthcoreSetClient<C, M> where
|
||||||
C: MiningBlockChainClient,
|
C: MiningBlockChainClient,
|
||||||
M: MinerService {
|
M: MinerService
|
||||||
|
{
|
||||||
client: Weak<C>,
|
client: Weak<C>,
|
||||||
miner: Weak<M>,
|
miner: Weak<M>,
|
||||||
net: Weak<NetworkService<SyncMessage>>,
|
net: Weak<ManageNetwork>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> EthcoreSetClient<C, M> where
|
impl<C, M> EthcoreSetClient<C, M> where
|
||||||
C: MiningBlockChainClient,
|
C: MiningBlockChainClient,
|
||||||
M: MinerService {
|
M: MinerService {
|
||||||
/// Creates new `EthcoreSetClient`.
|
/// Creates new `EthcoreSetClient`.
|
||||||
pub fn new(client: &Arc<C>, miner: &Arc<M>, net: &Arc<NetworkService<SyncMessage>>) -> Self {
|
pub fn new(client: &Arc<C>, miner: &Arc<M>, net: &Arc<ManageNetwork>) -> Self {
|
||||||
EthcoreSetClient {
|
EthcoreSetClient {
|
||||||
client: Arc::downgrade(client),
|
client: Arc::downgrade(client),
|
||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
@ -144,4 +144,14 @@ impl<C, M> EthcoreSet for EthcoreSetClient<C, M> where
|
|||||||
take_weak!(self.net).set_non_reserved_mode(NonReservedPeerMode::Accept);
|
take_weak!(self.net).set_non_reserved_mode(NonReservedPeerMode::Accept);
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn start_network(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
take_weak!(self.net).start_network();
|
||||||
|
Ok(Value::Bool(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop_network(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
take_weak!(self.net).stop_network();
|
||||||
|
Ok(Value::Bool(true))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,13 +48,4 @@ impl<S> Net for NetClient<S> where S: SyncProvider + 'static {
|
|||||||
Ok(Value::Bool(true))
|
Ok(Value::Bool(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_network(&self, _: Params) -> Result<Value, Error> {
|
|
||||||
take_weak!(self.sync).start_network();
|
|
||||||
Ok(Value::Bool(true))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stop_network(&self, _: Params) -> Result<Value, Error> {
|
|
||||||
take_weak!(self.sync).stop_network();
|
|
||||||
Ok(Value::Bool(true))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -59,11 +59,5 @@ impl SyncProvider for TestSyncProvider {
|
|||||||
fn status(&self) -> SyncStatus {
|
fn status(&self) -> SyncStatus {
|
||||||
self.status.unwrapped_read().clone()
|
self.status.unwrapped_read().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_network(&self) {
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stop_network(&self) {
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,12 +19,12 @@ use std::str::FromStr;
|
|||||||
use jsonrpc_core::IoHandler;
|
use jsonrpc_core::IoHandler;
|
||||||
use v1::{EthcoreSet, EthcoreSetClient};
|
use v1::{EthcoreSet, EthcoreSetClient};
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use ethcore::service::SyncMessage;
|
|
||||||
use ethcore::client::TestBlockChainClient;
|
use ethcore::client::TestBlockChainClient;
|
||||||
use v1::tests::helpers::TestMinerService;
|
use v1::tests::helpers::TestMinerService;
|
||||||
use util::numbers::*;
|
use util::numbers::*;
|
||||||
use util::network::{NetworkConfiguration, NetworkService};
|
|
||||||
use rustc_serialize::hex::FromHex;
|
use rustc_serialize::hex::FromHex;
|
||||||
|
use super::manage_network::TestManageNetwork;
|
||||||
|
use ethsync::ManageNetwork;
|
||||||
|
|
||||||
fn miner_service() -> Arc<TestMinerService> {
|
fn miner_service() -> Arc<TestMinerService> {
|
||||||
Arc::new(TestMinerService::default())
|
Arc::new(TestMinerService::default())
|
||||||
@ -34,12 +34,12 @@ fn client_service() -> Arc<TestBlockChainClient> {
|
|||||||
Arc::new(TestBlockChainClient::default())
|
Arc::new(TestBlockChainClient::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn network_service() -> Arc<NetworkService<SyncMessage>> {
|
fn network_service() -> Arc<TestManageNetwork> {
|
||||||
Arc::new(NetworkService::new(NetworkConfiguration::new()).unwrap())
|
Arc::new(TestManageNetwork)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ethcore_set_client(client: &Arc<TestBlockChainClient>, miner: &Arc<TestMinerService>, net: &Arc<NetworkService<SyncMessage>>) -> EthcoreSetClient<TestBlockChainClient, TestMinerService> {
|
fn ethcore_set_client(client: &Arc<TestBlockChainClient>, miner: &Arc<TestMinerService>, net: &Arc<TestManageNetwork>) -> EthcoreSetClient<TestBlockChainClient, TestMinerService> {
|
||||||
EthcoreSetClient::new(client, miner, net)
|
EthcoreSetClient::new(client, miner, &(net.clone() as Arc<ManageNetwork>))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
30
rpc/src/v1/tests/mocked/manage_network.rs
Normal file
30
rpc/src/v1/tests/mocked/manage_network.rs
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use ethsync::ManageNetwork;
|
||||||
|
use util::network::NetworkConfiguration;
|
||||||
|
|
||||||
|
pub struct TestManageNetwork;
|
||||||
|
|
||||||
|
// TODO: rob, gavin (originally introduced this functions) - proper tests and test state
|
||||||
|
impl ManageNetwork for TestManageNetwork {
|
||||||
|
fn set_non_reserved_mode(&self, _mode: ::util::network::NonReservedPeerMode) {}
|
||||||
|
fn remove_reserved_peer(&self, _peer: &str) -> Result<(), String> { Ok(()) }
|
||||||
|
fn add_reserved_peer(&self, _peer: &str) -> Result<(), String> { Ok(()) }
|
||||||
|
fn start_network(&self) {}
|
||||||
|
fn stop_network(&self) {}
|
||||||
|
fn network_config(&self) -> NetworkConfiguration { NetworkConfiguration::new_local() }
|
||||||
|
}
|
@ -26,3 +26,4 @@ mod personal_signer;
|
|||||||
mod ethcore;
|
mod ethcore;
|
||||||
mod ethcore_set;
|
mod ethcore_set;
|
||||||
mod rpc;
|
mod rpc;
|
||||||
|
mod manage_network;
|
||||||
|
@ -55,6 +55,12 @@ pub trait EthcoreSet: Sized + Send + Sync + 'static {
|
|||||||
/// Accept non-reserved peers (default behavior)
|
/// Accept non-reserved peers (default behavior)
|
||||||
fn accept_non_reserved_peers(&self, _: Params) -> Result<Value, Error>;
|
fn accept_non_reserved_peers(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
|
/// Start the network.
|
||||||
|
fn start_network(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
|
/// Stop the network.
|
||||||
|
fn stop_network(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Should be used to convert object to io delegate.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
fn to_delegate(self) -> IoDelegate<Self> {
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||||
|
@ -30,12 +30,6 @@ pub trait Net: Sized + Send + Sync + 'static {
|
|||||||
/// Otherwise false.
|
/// Otherwise false.
|
||||||
fn is_listening(&self, _: Params) -> Result<Value, Error>;
|
fn is_listening(&self, _: Params) -> Result<Value, Error>;
|
||||||
|
|
||||||
/// Start the network.
|
|
||||||
fn start_network(&self, _: Params) -> Result<Value, Error>;
|
|
||||||
|
|
||||||
/// Stop the network.
|
|
||||||
fn stop_network(&self, _: Params) -> Result<Value, Error>;
|
|
||||||
|
|
||||||
/// Should be used to convert object to io delegate.
|
/// Should be used to convert object to io delegate.
|
||||||
fn to_delegate(self) -> IoDelegate<Self> {
|
fn to_delegate(self) -> IoDelegate<Self> {
|
||||||
let mut delegate = IoDelegate::new(Arc::new(self));
|
let mut delegate = IoDelegate::new(Arc::new(self));
|
||||||
|
@ -18,9 +18,9 @@ env_logger = "0.3"
|
|||||||
ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "stable" }
|
ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "stable" }
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore-rpc = { path = "../rpc" }
|
ethcore-rpc = { path = "../rpc" }
|
||||||
parity-minimal-sysui = { git = "https://github.com/ethcore/parity-dapps-minimal-sysui-rs.git", version = "0.2.0" }
|
parity-dapps-signer = { git = "https://github.com/ethcore/parity-ui.git", version = "0.2.0" }
|
||||||
|
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
dev = ["clippy"]
|
dev = ["clippy"]
|
||||||
|
@ -51,7 +51,7 @@ extern crate ethcore_util as util;
|
|||||||
extern crate ethcore_rpc as rpc;
|
extern crate ethcore_rpc as rpc;
|
||||||
extern crate jsonrpc_core;
|
extern crate jsonrpc_core;
|
||||||
extern crate ws;
|
extern crate ws;
|
||||||
extern crate parity_minimal_sysui as sysui;
|
extern crate parity_dapps_signer as signer;
|
||||||
|
|
||||||
mod authcode_store;
|
mod authcode_store;
|
||||||
mod ws_server;
|
mod ws_server;
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
//! Session handlers factory.
|
//! Session handlers factory.
|
||||||
|
|
||||||
use ws;
|
use ws;
|
||||||
use sysui;
|
use signer;
|
||||||
use authcode_store::AuthCodes;
|
use authcode_store::AuthCodes;
|
||||||
use std::path::{PathBuf, Path};
|
use std::path::{PathBuf, Path};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -25,11 +25,11 @@ use std::str::FromStr;
|
|||||||
use jsonrpc_core::IoHandler;
|
use jsonrpc_core::IoHandler;
|
||||||
use util::H256;
|
use util::H256;
|
||||||
|
|
||||||
fn origin_is_allowed(self_origin: &str, header: Option<&Vec<u8>>) -> bool {
|
fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool {
|
||||||
match header {
|
match header {
|
||||||
None => false,
|
None => false,
|
||||||
Some(h) => {
|
Some(h) => {
|
||||||
let v = String::from_utf8(h.clone()).ok();
|
let v = String::from_utf8(h.to_owned()).ok();
|
||||||
match v {
|
match v {
|
||||||
Some(ref origin) if origin.starts_with("chrome-extension://") => true,
|
Some(ref origin) if origin.starts_with("chrome-extension://") => true,
|
||||||
Some(ref origin) if origin.starts_with(self_origin) => true,
|
Some(ref origin) if origin.starts_with(self_origin) => true,
|
||||||
@ -84,8 +84,8 @@ pub struct Session {
|
|||||||
|
|
||||||
impl ws::Handler for Session {
|
impl ws::Handler for Session {
|
||||||
fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> {
|
fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> {
|
||||||
let origin = req.header("origin").or_else(|| req.header("Origin"));
|
let origin = req.header("origin").or_else(|| req.header("Origin")).map(|x| &x[..]);
|
||||||
let host = req.header("host").or_else(|| req.header("Host"));
|
let host = req.header("host").or_else(|| req.header("Host")).map(|x| &x[..]);
|
||||||
|
|
||||||
// Check request origin and host header.
|
// Check request origin and host header.
|
||||||
if !origin_is_allowed(&self.self_origin, origin) && !(origin.is_none() && origin_is_allowed(&self.self_origin, host)) {
|
if !origin_is_allowed(&self.self_origin, origin) && !(origin.is_none() && origin_is_allowed(&self.self_origin, host)) {
|
||||||
@ -111,7 +111,7 @@ impl ws::Handler for Session {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise try to serve a page.
|
// Otherwise try to serve a page.
|
||||||
Ok(sysui::handle(req.resource())
|
Ok(signer::handle(req.resource())
|
||||||
.map_or_else(
|
.map_or_else(
|
||||||
// return 404 not found
|
// return 404 not found
|
||||||
|| add_headers(ws::Response::not_found("Not found".into()), "text/plain"),
|
|| add_headers(ws::Response::not_found("Not found".into()), "text/plain"),
|
||||||
|
@ -10,7 +10,7 @@ authors = ["Ethcore <admin@ethcore.io"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
ethcore = { path = "../ethcore" }
|
ethcore = { path = "../ethcore" }
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
env_logger = "0.3"
|
env_logger = "0.3"
|
||||||
time = "0.1.34"
|
time = "0.1.34"
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
use util::{NetworkContext, PeerId, PacketId,};
|
use util::{NetworkContext, PeerId, PacketId,};
|
||||||
use util::error::UtilError;
|
use util::error::UtilError;
|
||||||
use ethcore::service::SyncMessage;
|
|
||||||
use ethcore::client::BlockChainClient;
|
use ethcore::client::BlockChainClient;
|
||||||
|
|
||||||
/// IO interface for the syning handler.
|
/// IO interface for the syning handler.
|
||||||
@ -47,13 +46,13 @@ pub trait SyncIo {
|
|||||||
|
|
||||||
/// Wraps `NetworkContext` and the blockchain client
|
/// Wraps `NetworkContext` and the blockchain client
|
||||||
pub struct NetSyncIo<'s, 'h> where 'h: 's {
|
pub struct NetSyncIo<'s, 'h> where 'h: 's {
|
||||||
network: &'s NetworkContext<'h, SyncMessage>,
|
network: &'s NetworkContext<'h>,
|
||||||
chain: &'s BlockChainClient
|
chain: &'s BlockChainClient
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'s, 'h> NetSyncIo<'s, 'h> {
|
impl<'s, 'h> NetSyncIo<'s, 'h> {
|
||||||
/// Creates a new instance from the `NetworkContext` and the blockchain client reference.
|
/// Creates a new instance from the `NetworkContext` and the blockchain client reference.
|
||||||
pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> {
|
pub fn new(network: &'s NetworkContext<'h>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> {
|
||||||
NetSyncIo {
|
NetSyncIo {
|
||||||
network: network,
|
network: network,
|
||||||
chain: chain,
|
chain: chain,
|
||||||
|
186
sync/src/lib.rs
186
sync/src/lib.rs
@ -34,15 +34,14 @@
|
|||||||
//! extern crate ethsync;
|
//! extern crate ethsync;
|
||||||
//! use std::env;
|
//! use std::env;
|
||||||
//! use std::sync::Arc;
|
//! use std::sync::Arc;
|
||||||
//! use util::network::{NetworkService, NetworkConfiguration};
|
//! use util::network::{NetworkConfiguration};
|
||||||
|
//! use util::io::IoChannel;
|
||||||
//! use ethcore::client::{Client, ClientConfig};
|
//! use ethcore::client::{Client, ClientConfig};
|
||||||
//! use ethsync::{EthSync, SyncConfig};
|
//! use ethsync::{EthSync, SyncConfig, ManageNetwork};
|
||||||
//! use ethcore::ethereum;
|
//! use ethcore::ethereum;
|
||||||
//! use ethcore::miner::{GasPricer, Miner};
|
//! use ethcore::miner::{GasPricer, Miner};
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn main() {
|
||||||
//! let mut service = NetworkService::new(NetworkConfiguration::new()).unwrap();
|
|
||||||
//! service.start().unwrap();
|
|
||||||
//! let dir = env::temp_dir();
|
//! let dir = env::temp_dir();
|
||||||
//! let miner = Miner::new(
|
//! let miner = Miner::new(
|
||||||
//! Default::default(),
|
//! Default::default(),
|
||||||
@ -55,10 +54,10 @@
|
|||||||
//! ethereum::new_frontier(),
|
//! ethereum::new_frontier(),
|
||||||
//! &dir,
|
//! &dir,
|
||||||
//! miner,
|
//! miner,
|
||||||
//! service.io().channel()
|
//! IoChannel::disconnected()
|
||||||
//! ).unwrap();
|
//! ).unwrap();
|
||||||
//! let sync = EthSync::new(SyncConfig::default(), client);
|
//! let sync = EthSync::new(SyncConfig::default(), client, NetworkConfiguration::new()).unwrap();
|
||||||
//! EthSync::register(&mut service, sync);
|
//! sync.start_network();
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
@ -75,13 +74,10 @@ extern crate heapsize;
|
|||||||
|
|
||||||
use std::ops::*;
|
use std::ops::*;
|
||||||
use std::sync::*;
|
use std::sync::*;
|
||||||
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId};
|
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkConfiguration};
|
||||||
use util::{TimerToken, U256, RwLockable};
|
use util::{TimerToken, U256, H256, RwLockable, UtilError};
|
||||||
use ethcore::client::Client;
|
use ethcore::client::{Client, ChainNotify};
|
||||||
use ethcore::service::{SyncMessage, NetSyncMessage};
|
|
||||||
use io::NetSyncIo;
|
use io::NetSyncIo;
|
||||||
use util::io::IoChannel;
|
|
||||||
use util::{NetworkIoMessage, NetworkError};
|
|
||||||
use chain::ChainSync;
|
use chain::ChainSync;
|
||||||
|
|
||||||
mod chain;
|
mod chain;
|
||||||
@ -91,6 +87,9 @@ mod io;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
|
/// Ethereum sync protocol
|
||||||
|
pub const ETH_PROTOCOL: &'static str = "eth";
|
||||||
|
|
||||||
/// Sync configuration
|
/// Sync configuration
|
||||||
pub struct SyncConfig {
|
pub struct SyncConfig {
|
||||||
/// Max blocks to download ahead
|
/// Max blocks to download ahead
|
||||||
@ -112,99 +111,142 @@ impl Default for SyncConfig {
|
|||||||
pub trait SyncProvider: Send + Sync {
|
pub trait SyncProvider: Send + Sync {
|
||||||
/// Get sync status
|
/// Get sync status
|
||||||
fn status(&self) -> SyncStatus;
|
fn status(&self) -> SyncStatus;
|
||||||
/// Start the network
|
|
||||||
fn start_network(&self);
|
|
||||||
/// Stop the network
|
|
||||||
fn stop_network(&self);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ethereum network protocol handler
|
/// Ethereum network protocol handler
|
||||||
pub struct EthSync {
|
pub struct EthSync {
|
||||||
/// Shared blockchain client. TODO: this should evetually become an IPC endpoint
|
/// Network service
|
||||||
chain: Arc<Client>,
|
network: NetworkService,
|
||||||
/// Sync strategy
|
/// Protocol handler
|
||||||
sync: RwLock<ChainSync>,
|
handler: Arc<SyncProtocolHandler>,
|
||||||
/// IO communication chnnel.
|
|
||||||
io_channel: RwLock<IoChannel<NetSyncMessage>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use self::chain::{SyncStatus, SyncState};
|
pub use self::chain::{SyncStatus, SyncState};
|
||||||
|
|
||||||
impl EthSync {
|
impl EthSync {
|
||||||
/// Creates and register protocol with the network service
|
/// Creates and register protocol with the network service
|
||||||
pub fn new(config: SyncConfig, chain: Arc<Client>) -> Arc<EthSync> {
|
pub fn new(config: SyncConfig, chain: Arc<Client>, network_config: NetworkConfiguration) -> Result<Arc<EthSync>, UtilError> {
|
||||||
let sync = ChainSync::new(config, chain.deref());
|
let chain_sync = ChainSync::new(config, chain.deref());
|
||||||
Arc::new(EthSync {
|
let service = try!(NetworkService::new(network_config));
|
||||||
chain: chain,
|
let sync = Arc::new(EthSync{
|
||||||
sync: RwLock::new(sync),
|
network: service,
|
||||||
io_channel: RwLock::new(IoChannel::disconnected()),
|
handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain }),
|
||||||
})
|
});
|
||||||
}
|
|
||||||
|
|
||||||
/// Register protocol with the network service
|
Ok(sync)
|
||||||
pub fn register(service: &NetworkService<SyncMessage>, sync: Arc<EthSync>) -> Result<(), NetworkError> {
|
|
||||||
service.register_protocol(sync.clone(), "eth", &[62u8, 63u8])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Stop sync
|
|
||||||
pub fn stop(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
|
||||||
self.sync.unwrapped_write().abort(&mut NetSyncIo::new(io, self.chain.deref()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Restart sync
|
|
||||||
pub fn restart(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
|
||||||
self.sync.unwrapped_write().restart(&mut NetSyncIo::new(io, self.chain.deref()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SyncProvider for EthSync {
|
impl SyncProvider for EthSync {
|
||||||
/// Get sync status
|
/// Get sync status
|
||||||
fn status(&self) -> SyncStatus {
|
fn status(&self) -> SyncStatus {
|
||||||
self.sync.unwrapped_read().status()
|
self.handler.sync.unwrapped_read().status()
|
||||||
}
|
|
||||||
|
|
||||||
fn start_network(&self) {
|
|
||||||
self.io_channel.unwrapped_read().send(NetworkIoMessage::User(SyncMessage::StartNetwork))
|
|
||||||
.unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn stop_network(&self) {
|
|
||||||
self.io_channel.unwrapped_read().send(NetworkIoMessage::User(SyncMessage::StopNetwork))
|
|
||||||
.unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
struct SyncProtocolHandler {
|
||||||
fn initialize(&self, io: &NetworkContext<SyncMessage>) {
|
/// Shared blockchain client. TODO: this should evetually become an IPC endpoint
|
||||||
|
chain: Arc<Client>,
|
||||||
|
/// Sync strategy
|
||||||
|
sync: RwLock<ChainSync>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkProtocolHandler for SyncProtocolHandler {
|
||||||
|
fn initialize(&self, io: &NetworkContext) {
|
||||||
io.register_timer(0, 1000).expect("Error registering sync timer");
|
io.register_timer(0, 1000).expect("Error registering sync timer");
|
||||||
*self.io_channel.unwrapped_write() = io.io_channel();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read(&self, io: &NetworkContext<SyncMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, self.chain.deref()) , *peer, packet_id, data);
|
ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, self.chain.deref()), *peer, packet_id, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connected(&self, io: &NetworkContext<SyncMessage>, peer: &PeerId) {
|
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||||
self.sync.unwrapped_write().on_peer_connected(&mut NetSyncIo::new(io, self.chain.deref()), *peer);
|
self.sync.unwrapped_write().on_peer_connected(&mut NetSyncIo::new(io, self.chain.deref()), *peer);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn disconnected(&self, io: &NetworkContext<SyncMessage>, peer: &PeerId) {
|
fn disconnected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||||
self.sync.unwrapped_write().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer);
|
self.sync.unwrapped_write().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&self, io: &NetworkContext<SyncMessage>, _timer: TimerToken) {
|
fn timeout(&self, io: &NetworkContext, _timer: TimerToken) {
|
||||||
self.sync.unwrapped_write().maintain_peers(&mut NetSyncIo::new(io, self.chain.deref()));
|
self.sync.unwrapped_write().maintain_peers(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
self.sync.unwrapped_write().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref()));
|
self.sync.unwrapped_write().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
impl ChainNotify for EthSync {
|
||||||
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
|
fn new_blocks(&self,
|
||||||
match *message {
|
imported: Vec<H256>,
|
||||||
SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted, ref sealed } => {
|
invalid: Vec<H256>,
|
||||||
let mut sync_io = NetSyncIo::new(io, self.chain.deref());
|
enacted: Vec<H256>,
|
||||||
self.sync.unwrapped_write().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted, sealed);
|
retracted: Vec<H256>,
|
||||||
},
|
sealed: Vec<H256>)
|
||||||
_ => {/* Ignore other messages */},
|
{
|
||||||
}
|
self.network.with_context(ETH_PROTOCOL, |context| {
|
||||||
|
let mut sync_io = NetSyncIo::new(context, self.handler.chain.deref());
|
||||||
|
self.handler.sync.unwrapped_write().chain_new_blocks(
|
||||||
|
&mut sync_io,
|
||||||
|
&imported,
|
||||||
|
&invalid,
|
||||||
|
&enacted,
|
||||||
|
&retracted,
|
||||||
|
&sealed);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start(&self) {
|
||||||
|
self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e));
|
||||||
|
self.network.register_protocol(self.handler.clone(), ETH_PROTOCOL, &[62u8, 63u8])
|
||||||
|
.unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self) {
|
||||||
|
self.network.stop().unwrap_or_else(|e| warn!("Error stopping network: {:?}", e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trait for managing network
|
||||||
|
pub trait ManageNetwork : Send + Sync {
|
||||||
|
/// Set mode for reserved peers (allow/deny peers that are unreserved)
|
||||||
|
fn set_non_reserved_mode(&self, mode: ::util::network::NonReservedPeerMode);
|
||||||
|
/// Remove reservation for the peer
|
||||||
|
fn remove_reserved_peer(&self, peer: &str) -> Result<(), String>;
|
||||||
|
/// Add reserved peer
|
||||||
|
fn add_reserved_peer(&self, peer: &str) -> Result<(), String>;
|
||||||
|
/// Start network
|
||||||
|
fn start_network(&self);
|
||||||
|
/// Stop network
|
||||||
|
fn stop_network(&self);
|
||||||
|
/// Query the current configuration of the network
|
||||||
|
fn network_config(&self) -> NetworkConfiguration;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ManageNetwork for EthSync {
|
||||||
|
fn set_non_reserved_mode(&self, mode: ::util::network::NonReservedPeerMode) {
|
||||||
|
self.network.set_non_reserved_mode(mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove_reserved_peer(&self, peer: &str) -> Result<(), String> {
|
||||||
|
self.network.remove_reserved_peer(peer).map_err(|e| format!("{:?}", e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_reserved_peer(&self, peer: &str) -> Result<(), String> {
|
||||||
|
self.network.add_reserved_peer(peer).map_err(|e| format!("{:?}", e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_network(&self) {
|
||||||
|
self.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop_network(&self) {
|
||||||
|
self.network.with_context(ETH_PROTOCOL, |context| {
|
||||||
|
let mut sync_io = NetSyncIo::new(context, self.handler.chain.deref());
|
||||||
|
self.handler.sync.unwrapped_write().abort(&mut sync_io);
|
||||||
|
});
|
||||||
|
self.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn network_config(&self) -> NetworkConfiguration {
|
||||||
|
self.network.config().clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ itertools = "0.4"
|
|||||||
crossbeam = "0.2"
|
crossbeam = "0.2"
|
||||||
slab = "0.2"
|
slab = "0.2"
|
||||||
sha3 = { path = "sha3" }
|
sha3 = { path = "sha3" }
|
||||||
clippy = { version = "0.0.78", optional = true}
|
clippy = { version = "0.0.79", optional = true}
|
||||||
igd = "0.5.0"
|
igd = "0.5.0"
|
||||||
ethcore-devtools = { path = "../devtools" }
|
ethcore-devtools = { path = "../devtools" }
|
||||||
libc = "0.2.7"
|
libc = "0.2.7"
|
||||||
|
@ -242,7 +242,7 @@ pub enum FromBytesError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Value that can be serialized from bytes array
|
/// Value that can be serialized from bytes array
|
||||||
pub trait FromRawBytes : Sized {
|
pub trait FromRawBytes: Sized {
|
||||||
/// function that will instantiate and initialize object from slice
|
/// function that will instantiate and initialize object from slice
|
||||||
fn from_bytes(d: &[u8]) -> Result<Self, FromBytesError>;
|
fn from_bytes(d: &[u8]) -> Result<Self, FromBytesError>;
|
||||||
}
|
}
|
||||||
@ -255,7 +255,7 @@ impl<T> FromRawBytes for T where T: FixedHash {
|
|||||||
Ordering::Equal => ()
|
Ordering::Equal => ()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut res: Self = unsafe { mem::uninitialized() };
|
let mut res = T::zero();
|
||||||
res.copy_raw(bytes);
|
res.copy_raw(bytes);
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ macro_rules! sized_binary_map {
|
|||||||
::std::cmp::Ordering::Greater => return Err(FromBytesError::TooLong),
|
::std::cmp::Ordering::Greater => return Err(FromBytesError::TooLong),
|
||||||
::std::cmp::Ordering::Equal => ()
|
::std::cmp::Ordering::Equal => ()
|
||||||
};
|
};
|
||||||
let mut res: Self = unsafe { ::std::mem::uninitialized() };
|
let mut res: Self = 0;
|
||||||
res.copy_raw(bytes);
|
res.copy_raw(bytes);
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
@ -298,7 +298,7 @@ sized_binary_map!(u32);
|
|||||||
sized_binary_map!(u64);
|
sized_binary_map!(u64);
|
||||||
|
|
||||||
/// Value that can be serialized from variable-length byte array
|
/// Value that can be serialized from variable-length byte array
|
||||||
pub trait FromRawBytesVariable : Sized {
|
pub trait FromRawBytesVariable: Sized {
|
||||||
/// Create value from slice
|
/// Create value from slice
|
||||||
fn from_bytes_variable(bytes: &[u8]) -> Result<Self, FromBytesError>;
|
fn from_bytes_variable(bytes: &[u8]) -> Result<Self, FromBytesError>;
|
||||||
}
|
}
|
||||||
@ -326,7 +326,7 @@ impl<T> FromRawBytesVariable for Vec<T> where T: FromRawBytes {
|
|||||||
let size_of_t = mem::size_of::<T>();
|
let size_of_t = mem::size_of::<T>();
|
||||||
let length_in_chunks = bytes.len() / size_of_t;
|
let length_in_chunks = bytes.len() / size_of_t;
|
||||||
|
|
||||||
let mut result = Vec::with_capacity(length_in_chunks );
|
let mut result = Vec::with_capacity(length_in_chunks);
|
||||||
unsafe { result.set_len(length_in_chunks) };
|
unsafe { result.set_len(length_in_chunks) };
|
||||||
for i in 0..length_in_chunks {
|
for i in 0..length_in_chunks {
|
||||||
*result.get_mut(i).unwrap() = try!(T::from_bytes(
|
*result.get_mut(i).unwrap() = try!(T::from_bytes(
|
||||||
@ -339,7 +339,7 @@ impl<T> FromRawBytesVariable for Vec<T> where T: FromRawBytes {
|
|||||||
impl<V1, T2> FromRawBytes for (V1, T2) where V1: FromRawBytesVariable, T2: FromRawBytes {
|
impl<V1, T2> FromRawBytes for (V1, T2) where V1: FromRawBytesVariable, T2: FromRawBytes {
|
||||||
fn from_bytes(bytes: &[u8]) -> Result<Self, FromBytesError> {
|
fn from_bytes(bytes: &[u8]) -> Result<Self, FromBytesError> {
|
||||||
let header = 8usize;
|
let header = 8usize;
|
||||||
let mut map: (u64, ) = unsafe { mem::uninitialized() };
|
let mut map: (u64, ) = (0,);
|
||||||
|
|
||||||
if bytes.len() < header { return Err(FromBytesError::NotLongEnough); }
|
if bytes.len() < header { return Err(FromBytesError::NotLongEnough); }
|
||||||
map.copy_raw(&bytes[0..header]);
|
map.copy_raw(&bytes[0..header]);
|
||||||
@ -358,7 +358,7 @@ impl<V1, V2, T3> FromRawBytes for (V1, V2, T3)
|
|||||||
{
|
{
|
||||||
fn from_bytes(bytes: &[u8]) -> Result<Self, FromBytesError> {
|
fn from_bytes(bytes: &[u8]) -> Result<Self, FromBytesError> {
|
||||||
let header = 16usize;
|
let header = 16usize;
|
||||||
let mut map: (u64, u64, ) = unsafe { mem::uninitialized() };
|
let mut map: (u64, u64, ) = (0, 0,);
|
||||||
|
|
||||||
if bytes.len() < header { return Err(FromBytesError::NotLongEnough); }
|
if bytes.len() < header { return Err(FromBytesError::NotLongEnough); }
|
||||||
map.copy_raw(&bytes[0..header]);
|
map.copy_raw(&bytes[0..header]);
|
||||||
@ -373,7 +373,7 @@ impl<V1, V2, T3> FromRawBytes for (V1, V2, T3)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, V1, T2> ToBytesWithMap for (&'a Vec<V1>, &'a T2) where V1: ToBytesWithMap, T2: ToBytesWithMap {
|
impl<'a, V1, X1, T2> ToBytesWithMap for (X1, &'a T2) where V1: ToBytesWithMap, X1: Deref<Target=[V1]>, T2: ToBytesWithMap {
|
||||||
fn to_bytes_map(&self) -> Vec<u8> {
|
fn to_bytes_map(&self) -> Vec<u8> {
|
||||||
let header = 8usize;
|
let header = 8usize;
|
||||||
let v1_size = mem::size_of::<V1>();
|
let v1_size = mem::size_of::<V1>();
|
||||||
@ -390,9 +390,9 @@ impl<'a, V1, T2> ToBytesWithMap for (&'a Vec<V1>, &'a T2) where V1: ToBytesWithM
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, V1, V2, T3> ToBytesWithMap for (&'a Vec<V1>, &'a Vec<V2>, &'a T3)
|
impl<'a, V1, X1, V2, X2, T3> ToBytesWithMap for (X1, X2, &'a T3)
|
||||||
where V1: ToBytesWithMap,
|
where V1: ToBytesWithMap, X1: Deref<Target=[V1]>,
|
||||||
V2: ToBytesWithMap,
|
V2: ToBytesWithMap, X2: Deref<Target=[V2]>,
|
||||||
T3: ToBytesWithMap
|
T3: ToBytesWithMap
|
||||||
{
|
{
|
||||||
fn to_bytes_map(&self) -> Vec<u8> {
|
fn to_bytes_map(&self) -> Vec<u8> {
|
||||||
@ -433,7 +433,7 @@ pub trait ToBytesWithMap {
|
|||||||
|
|
||||||
impl<T> ToBytesWithMap for T where T: FixedHash {
|
impl<T> ToBytesWithMap for T where T: FixedHash {
|
||||||
fn to_bytes_map(&self) -> Vec<u8> {
|
fn to_bytes_map(&self) -> Vec<u8> {
|
||||||
self.as_slice().to_vec()
|
self.as_slice().to_owned()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,7 +493,7 @@ fn populate_big_types() {
|
|||||||
fn raw_bytes_from_tuple() {
|
fn raw_bytes_from_tuple() {
|
||||||
type Tup = (Vec<u16>, u16);
|
type Tup = (Vec<u16>, u16);
|
||||||
|
|
||||||
let tup = (vec![1u16, 1u16, 1u16, 1u16], 10u16);
|
let tup: (&[u16], u16) = (&[1; 4], 10);
|
||||||
let bytes = vec![
|
let bytes = vec![
|
||||||
// map
|
// map
|
||||||
8u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
8u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
|
||||||
@ -505,18 +505,19 @@ fn raw_bytes_from_tuple() {
|
|||||||
// 10u16
|
// 10u16
|
||||||
10u8, 0u8];
|
10u8, 0u8];
|
||||||
|
|
||||||
let tup_from = Tup::from_bytes(&bytes).unwrap();
|
let (v, x) = Tup::from_bytes(&bytes).unwrap();
|
||||||
assert_eq!(tup, tup_from);
|
assert_eq!(tup, (&v[..], x));
|
||||||
|
let tup_from = (v, x);
|
||||||
|
|
||||||
let tup_to = (&tup_from.0, &tup_from.1);
|
let tup_to = (tup_from.0, &tup_from.1);
|
||||||
let bytes_to = tup_to.to_bytes_map();
|
let bytes_to = tup_to.to_bytes_map();
|
||||||
assert_eq!(bytes_to, bytes);
|
assert_eq!(bytes_to, bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn bytes_map_from_triple() {
|
fn bytes_map_from_triple() {
|
||||||
let data = (vec![2u16; 6], vec![6u32; 3], 12u64);
|
let data: (&[u16], &[u32], u64) = (&[2; 6], &[6; 3], 12u64);
|
||||||
let bytes_map = (&data.0, &data.1, &data.2).to_bytes_map();
|
let bytes_map = (data.0, data.1, &data.2).to_bytes_map();
|
||||||
assert_eq!(bytes_map, vec![
|
assert_eq!(bytes_map, vec![
|
||||||
// data map 2 x u64
|
// data map 2 x u64
|
||||||
12, 0, 0, 0, 0, 0, 0, 0,
|
12, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
@ -65,6 +65,8 @@ pub enum UtilError {
|
|||||||
SimpleString(String),
|
SimpleString(String),
|
||||||
/// Error from a bad input size being given for the needed output.
|
/// Error from a bad input size being given for the needed output.
|
||||||
BadSize,
|
BadSize,
|
||||||
|
/// Error from snappy.
|
||||||
|
Snappy(::snappy::InvalidInput),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for UtilError {
|
impl fmt::Display for UtilError {
|
||||||
@ -82,6 +84,7 @@ impl fmt::Display for UtilError {
|
|||||||
UtilError::Decoder(ref err) => f.write_fmt(format_args!("{}", err)),
|
UtilError::Decoder(ref err) => f.write_fmt(format_args!("{}", err)),
|
||||||
UtilError::SimpleString(ref msg) => f.write_str(&msg),
|
UtilError::SimpleString(ref msg) => f.write_str(&msg),
|
||||||
UtilError::BadSize => f.write_str("Bad input size."),
|
UtilError::BadSize => f.write_str("Bad input size."),
|
||||||
|
UtilError::Snappy(ref err) => f.write_fmt(format_args!("{}", err)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -179,6 +182,12 @@ impl From<String> for UtilError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<::snappy::InvalidInput> for UtilError {
|
||||||
|
fn from(err: ::snappy::InvalidInput) -> UtilError {
|
||||||
|
UtilError::Snappy(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted.
|
// TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted.
|
||||||
/*#![feature(concat_idents)]
|
/*#![feature(concat_idents)]
|
||||||
macro_rules! assimilate {
|
macro_rules! assimilate {
|
||||||
|
@ -202,7 +202,7 @@ impl JournalDB for ArchiveDB {
|
|||||||
fn latest_era(&self) -> Option<u64> { self.latest_era }
|
fn latest_era(&self) -> Option<u64> { self.latest_era }
|
||||||
|
|
||||||
fn state(&self, id: &H256) -> Option<Bytes> {
|
fn state(&self, id: &H256) -> Option<Bytes> {
|
||||||
self.backing.get_by_prefix(&id[0..12]).and_then(|b| Some(b.to_vec()))
|
self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_pruned(&self) -> bool { false }
|
fn is_pruned(&self) -> bool { false }
|
||||||
|
@ -339,6 +339,10 @@ impl JournalDB for EarlyMergeDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn state(&self, id: &H256) -> Option<Bytes> {
|
||||||
|
self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
||||||
// journal format:
|
// journal format:
|
||||||
|
@ -57,7 +57,7 @@ pub enum Algorithm {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Algorithm {
|
impl Default for Algorithm {
|
||||||
fn default() -> Algorithm { Algorithm::Archive }
|
fn default() -> Algorithm { Algorithm::OverlayRecent }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for Algorithm {
|
impl fmt::Display for Algorithm {
|
||||||
|
@ -171,7 +171,7 @@ impl OverlayRecentDB {
|
|||||||
for r in insertions.iter() {
|
for r in insertions.iter() {
|
||||||
let k: H256 = r.val_at(0);
|
let k: H256 = r.val_at(0);
|
||||||
let v: Bytes = r.val_at(1);
|
let v: Bytes = r.val_at(1);
|
||||||
overlay.emplace(k.clone(), v);
|
overlay.emplace(OverlayRecentDB::to_short_key(&k), v);
|
||||||
inserted_keys.push(k);
|
inserted_keys.push(k);
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
@ -191,6 +191,13 @@ impl OverlayRecentDB {
|
|||||||
trace!("Recovered {} overlay entries, {} journal entries", count, journal.len());
|
trace!("Recovered {} overlay entries, {} journal entries", count, journal.len());
|
||||||
JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era }
|
JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn to_short_key(key: &H256) -> H256 {
|
||||||
|
let mut k = H256::new();
|
||||||
|
&mut k[0..DB_PREFIX_LEN].copy_from_slice(&key[0..DB_PREFIX_LEN]);
|
||||||
|
k
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JournalDB for OverlayRecentDB {
|
impl JournalDB for OverlayRecentDB {
|
||||||
@ -212,6 +219,11 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
|
|
||||||
fn latest_era(&self) -> Option<u64> { self.journal_overlay.unwrapped_read().latest_era }
|
fn latest_era(&self) -> Option<u64> { self.journal_overlay.unwrapped_read().latest_era }
|
||||||
|
|
||||||
|
fn state(&self, key: &H256) -> Option<Bytes> {
|
||||||
|
let v = self.journal_overlay.unwrapped_read().backing_overlay.get(&OverlayRecentDB::to_short_key(key)).map(|v| v.to_vec());
|
||||||
|
v.or_else(|| self.backing.get_by_prefix(&key[0..DB_PREFIX_LEN]).map(|b| b.to_vec()))
|
||||||
|
}
|
||||||
|
|
||||||
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
||||||
// record new commit's details.
|
// record new commit's details.
|
||||||
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
|
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
|
||||||
@ -230,7 +242,7 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
r.begin_list(2);
|
r.begin_list(2);
|
||||||
r.append(&k);
|
r.append(&k);
|
||||||
r.append(&v);
|
r.append(&v);
|
||||||
journal_overlay.backing_overlay.emplace(k, v);
|
journal_overlay.backing_overlay.emplace(OverlayRecentDB::to_short_key(&k), v);
|
||||||
}
|
}
|
||||||
r.append(&removed_keys);
|
r.append(&removed_keys);
|
||||||
|
|
||||||
@ -266,7 +278,7 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
{
|
{
|
||||||
if canon_id == journal.id {
|
if canon_id == journal.id {
|
||||||
for h in &journal.insertions {
|
for h in &journal.insertions {
|
||||||
if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) {
|
if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(&OverlayRecentDB::to_short_key(h)) {
|
||||||
if rc > 0 {
|
if rc > 0 {
|
||||||
canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy
|
canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy
|
||||||
}
|
}
|
||||||
@ -284,11 +296,11 @@ impl JournalDB for OverlayRecentDB {
|
|||||||
}
|
}
|
||||||
// update the overlay
|
// update the overlay
|
||||||
for k in overlay_deletions {
|
for k in overlay_deletions {
|
||||||
journal_overlay.backing_overlay.remove(&k);
|
journal_overlay.backing_overlay.remove(&OverlayRecentDB::to_short_key(&k));
|
||||||
}
|
}
|
||||||
// apply canon deletions
|
// apply canon deletions
|
||||||
for k in canon_deletions {
|
for k in canon_deletions {
|
||||||
if !journal_overlay.backing_overlay.contains(&k) {
|
if !journal_overlay.backing_overlay.contains(&OverlayRecentDB::to_short_key(&k)) {
|
||||||
try!(batch.delete(&k));
|
try!(batch.delete(&k));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -322,7 +334,7 @@ impl HashDB for OverlayRecentDB {
|
|||||||
match k {
|
match k {
|
||||||
Some(&(ref d, rc)) if rc > 0 => Some(d),
|
Some(&(ref d, rc)) if rc > 0 => Some(d),
|
||||||
_ => {
|
_ => {
|
||||||
let v = self.journal_overlay.unwrapped_read().backing_overlay.get(key).map(|v| v.to_vec());
|
let v = self.journal_overlay.unwrapped_read().backing_overlay.get(&OverlayRecentDB::to_short_key(key)).map(|v| v.to_vec());
|
||||||
match v {
|
match v {
|
||||||
Some(x) => {
|
Some(x) => {
|
||||||
Some(&self.transaction_overlay.denote(key, x).0)
|
Some(&self.transaction_overlay.denote(key, x).0)
|
||||||
|
@ -111,6 +111,10 @@ impl JournalDB for RefCountedDB {
|
|||||||
|
|
||||||
fn latest_era(&self) -> Option<u64> { self.latest_era }
|
fn latest_era(&self) -> Option<u64> { self.latest_era }
|
||||||
|
|
||||||
|
fn state(&self, id: &H256) -> Option<Bytes> {
|
||||||
|
self.backing.get_by_prefix(&id[0..DB_PREFIX_LEN]).map(|b| b.to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
|
||||||
// journal format:
|
// journal format:
|
||||||
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
|
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
|
||||||
|
@ -39,9 +39,7 @@ pub trait JournalDB : HashDB + Send + Sync {
|
|||||||
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError>;
|
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError>;
|
||||||
|
|
||||||
/// State data query
|
/// State data query
|
||||||
fn state(&self, _id: &H256) -> Option<Bytes> {
|
fn state(&self, _id: &H256) -> Option<Bytes>;
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether this database is pruned.
|
/// Whether this database is pruned.
|
||||||
fn is_pruned(&self) -> bool { true }
|
fn is_pruned(&self) -> bool { true }
|
||||||
|
@ -153,6 +153,7 @@ pub mod log;
|
|||||||
pub mod panics;
|
pub mod panics;
|
||||||
pub mod network_settings;
|
pub mod network_settings;
|
||||||
pub mod path;
|
pub mod path;
|
||||||
|
pub mod snappy;
|
||||||
mod timer;
|
mod timer;
|
||||||
|
|
||||||
pub use common::*;
|
pub use common::*;
|
||||||
|
@ -136,11 +136,11 @@ pub type ProtocolId = &'static str;
|
|||||||
|
|
||||||
/// Messages used to communitate with the event loop from other threads.
|
/// Messages used to communitate with the event loop from other threads.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum NetworkIoMessage<Message> where Message: Send + Sync + Clone {
|
pub enum NetworkIoMessage {
|
||||||
/// Register a new protocol handler.
|
/// Register a new protocol handler.
|
||||||
AddHandler {
|
AddHandler {
|
||||||
/// Handler shared instance.
|
/// Handler shared instance.
|
||||||
handler: Arc<NetworkProtocolHandler<Message> + Sync>,
|
handler: Arc<NetworkProtocolHandler + Sync>,
|
||||||
/// Protocol Id.
|
/// Protocol Id.
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
/// Supported protocol versions.
|
/// Supported protocol versions.
|
||||||
@ -163,8 +163,6 @@ pub enum NetworkIoMessage<Message> where Message: Send + Sync + Clone {
|
|||||||
DisablePeer(PeerId),
|
DisablePeer(PeerId),
|
||||||
/// Network has been started with the host as the given enode.
|
/// Network has been started with the host as the given enode.
|
||||||
NetworkStarted(String),
|
NetworkStarted(String),
|
||||||
/// User message
|
|
||||||
User(Message),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local (temporary) peer session ID.
|
/// Local (temporary) peer session ID.
|
||||||
@ -188,8 +186,8 @@ impl Encodable for CapabilityInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
||||||
pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, 's {
|
pub struct NetworkContext<'s> {
|
||||||
io: &'s IoContext<NetworkIoMessage<Message>>,
|
io: &'s IoContext<NetworkIoMessage>,
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
sessions: Arc<RwLock<Slab<SharedSession>>>,
|
sessions: Arc<RwLock<Slab<SharedSession>>>,
|
||||||
session: Option<SharedSession>,
|
session: Option<SharedSession>,
|
||||||
@ -197,12 +195,12 @@ pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'sta
|
|||||||
_reserved_peers: &'s HashSet<NodeId>,
|
_reserved_peers: &'s HashSet<NodeId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, {
|
impl<'s> NetworkContext<'s> {
|
||||||
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
|
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
|
||||||
fn new(io: &'s IoContext<NetworkIoMessage<Message>>,
|
fn new(io: &'s IoContext<NetworkIoMessage>,
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
session: Option<SharedSession>, sessions: Arc<RwLock<Slab<SharedSession>>>,
|
session: Option<SharedSession>, sessions: Arc<RwLock<Slab<SharedSession>>>,
|
||||||
reserved_peers: &'s HashSet<NodeId>) -> NetworkContext<'s, Message> {
|
reserved_peers: &'s HashSet<NodeId>) -> NetworkContext<'s> {
|
||||||
let id = session.as_ref().map(|s| s.locked().token());
|
let id = session.as_ref().map(|s| s.locked().token());
|
||||||
NetworkContext {
|
NetworkContext {
|
||||||
io: io,
|
io: io,
|
||||||
@ -238,13 +236,8 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone
|
|||||||
self.send(self.session_id.unwrap(), packet_id, data)
|
self.send(self.session_id.unwrap(), packet_id, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send an IO message
|
|
||||||
pub fn message(&self, msg: Message) -> Result<(), UtilError> {
|
|
||||||
self.io.message(NetworkIoMessage::User(msg))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get an IoChannel.
|
/// Get an IoChannel.
|
||||||
pub fn io_channel(&self) -> IoChannel<NetworkIoMessage<Message>> {
|
pub fn io_channel(&self) -> IoChannel<NetworkIoMessage> {
|
||||||
self.io.channel()
|
self.io.channel()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,13 +326,13 @@ struct ProtocolTimer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Root IO handler. Manages protocol handlers, IO timers and network connections.
|
/// Root IO handler. Manages protocol handlers, IO timers and network connections.
|
||||||
pub struct Host<Message> where Message: Send + Sync + Clone {
|
pub struct Host {
|
||||||
pub info: RwLock<HostInfo>,
|
pub info: RwLock<HostInfo>,
|
||||||
tcp_listener: Mutex<TcpListener>,
|
tcp_listener: Mutex<TcpListener>,
|
||||||
sessions: Arc<RwLock<Slab<SharedSession>>>,
|
sessions: Arc<RwLock<Slab<SharedSession>>>,
|
||||||
discovery: Mutex<Option<Discovery>>,
|
discovery: Mutex<Option<Discovery>>,
|
||||||
nodes: RwLock<NodeTable>,
|
nodes: RwLock<NodeTable>,
|
||||||
handlers: RwLock<HashMap<ProtocolId, Arc<NetworkProtocolHandler<Message>>>>,
|
handlers: RwLock<HashMap<ProtocolId, Arc<NetworkProtocolHandler>>>,
|
||||||
timers: RwLock<HashMap<TimerToken, ProtocolTimer>>,
|
timers: RwLock<HashMap<TimerToken, ProtocolTimer>>,
|
||||||
timer_counter: RwLock<usize>,
|
timer_counter: RwLock<usize>,
|
||||||
stats: Arc<NetworkStats>,
|
stats: Arc<NetworkStats>,
|
||||||
@ -348,9 +341,9 @@ pub struct Host<Message> where Message: Send + Sync + Clone {
|
|||||||
stopping: AtomicBool,
|
stopping: AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
impl Host {
|
||||||
/// Create a new instance
|
/// Create a new instance
|
||||||
pub fn new(config: NetworkConfiguration, stats: Arc<NetworkStats>) -> Result<Host<Message>, UtilError> {
|
pub fn new(config: NetworkConfiguration, stats: Arc<NetworkStats>) -> Result<Host, UtilError> {
|
||||||
trace!(target: "host", "Creating new Host object");
|
trace!(target: "host", "Creating new Host object");
|
||||||
|
|
||||||
let mut listen_address = match config.listen_address {
|
let mut listen_address = match config.listen_address {
|
||||||
@ -381,7 +374,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
let boot_nodes = config.boot_nodes.clone();
|
let boot_nodes = config.boot_nodes.clone();
|
||||||
let reserved_nodes = config.reserved_nodes.clone();
|
let reserved_nodes = config.reserved_nodes.clone();
|
||||||
|
|
||||||
let mut host = Host::<Message> {
|
let mut host = Host {
|
||||||
info: RwLock::new(HostInfo {
|
info: RwLock::new(HostInfo {
|
||||||
keys: keys,
|
keys: keys,
|
||||||
config: config,
|
config: config,
|
||||||
@ -444,7 +437,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode, io: &IoContext<NetworkIoMessage<Message>>) {
|
pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode, io: &IoContext<NetworkIoMessage>) {
|
||||||
let mut info = self.info.unwrapped_write();
|
let mut info = self.info.unwrapped_write();
|
||||||
|
|
||||||
if info.config.non_reserved_mode != mode {
|
if info.config.non_reserved_mode != mode {
|
||||||
@ -495,7 +488,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
r
|
r
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stop(&self, io: &IoContext<NetworkIoMessage<Message>>) -> Result<(), UtilError> {
|
pub fn stop(&self, io: &IoContext<NetworkIoMessage>) -> Result<(), UtilError> {
|
||||||
self.stopping.store(true, AtomicOrdering::Release);
|
self.stopping.store(true, AtomicOrdering::Release);
|
||||||
let mut to_kill = Vec::new();
|
let mut to_kill = Vec::new();
|
||||||
for e in self.sessions.unwrapped_write().iter_mut() {
|
for e in self.sessions.unwrapped_write().iter_mut() {
|
||||||
@ -511,7 +504,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init_public_interface(&self, io: &IoContext<NetworkIoMessage<Message>>) -> Result<(), UtilError> {
|
fn init_public_interface(&self, io: &IoContext<NetworkIoMessage>) -> Result<(), UtilError> {
|
||||||
if self.info.unwrapped_read().public_endpoint.is_some() {
|
if self.info.unwrapped_read().public_endpoint.is_some() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -567,7 +560,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn maintain_network(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn maintain_network(&self, io: &IoContext<NetworkIoMessage>) {
|
||||||
self.keep_alive(io);
|
self.keep_alive(io);
|
||||||
self.connect_peers(io);
|
self.connect_peers(io);
|
||||||
}
|
}
|
||||||
@ -588,7 +581,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
self.sessions.unwrapped_read().count() - self.session_count()
|
self.sessions.unwrapped_read().count() - self.session_count()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn keep_alive(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn keep_alive(&self, io: &IoContext<NetworkIoMessage>) {
|
||||||
let mut to_kill = Vec::new();
|
let mut to_kill = Vec::new();
|
||||||
for e in self.sessions.unwrapped_write().iter_mut() {
|
for e in self.sessions.unwrapped_write().iter_mut() {
|
||||||
let mut s = e.locked();
|
let mut s = e.locked();
|
||||||
@ -603,7 +596,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connect_peers(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn connect_peers(&self, io: &IoContext<NetworkIoMessage>) {
|
||||||
let (ideal_peers, mut pin) = {
|
let (ideal_peers, mut pin) = {
|
||||||
let info = self.info.unwrapped_read();
|
let info = self.info.unwrapped_read();
|
||||||
if info.capabilities.is_empty() {
|
if info.capabilities.is_empty() {
|
||||||
@ -651,7 +644,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
#[cfg_attr(feature="dev", allow(single_match))]
|
||||||
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage>) {
|
||||||
if self.have_session(id)
|
if self.have_session(id)
|
||||||
{
|
{
|
||||||
trace!(target: "network", "Aborted connect. Node already connected.");
|
trace!(target: "network", "Aborted connect. Node already connected.");
|
||||||
@ -688,9 +681,10 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
|
#[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
|
||||||
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) -> Result<(), UtilError> {
|
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage>) -> Result<(), UtilError> {
|
||||||
let nonce = self.info.unwrapped_write().next_nonce();
|
let nonce = self.info.unwrapped_write().next_nonce();
|
||||||
let mut sessions = self.sessions.unwrapped_write();
|
let mut sessions = self.sessions.unwrapped_write();
|
||||||
|
|
||||||
let token = sessions.insert_with_opt(|token| {
|
let token = sessions.insert_with_opt(|token| {
|
||||||
match Session::new(io, socket, token, id, &nonce, self.stats.clone(), &self.info.unwrapped_read()) {
|
match Session::new(io, socket, token, id, &nonce, self.stats.clone(), &self.info.unwrapped_read()) {
|
||||||
Ok(s) => Some(Arc::new(Mutex::new(s))),
|
Ok(s) => Some(Arc::new(Mutex::new(s))),
|
||||||
@ -710,7 +704,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accept(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn accept(&self, io: &IoContext<NetworkIoMessage>) {
|
||||||
trace!(target: "network", "Accepting incoming connection");
|
trace!(target: "network", "Accepting incoming connection");
|
||||||
loop {
|
loop {
|
||||||
let socket = match self.tcp_listener.locked().accept() {
|
let socket = match self.tcp_listener.locked().accept() {
|
||||||
@ -727,8 +721,9 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn session_writable(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn session_writable(&self, token: StreamToken, io: &IoContext<NetworkIoMessage>) {
|
||||||
let session = { self.sessions.unwrapped_read().get(token).cloned() };
|
let session = { self.sessions.unwrapped_read().get(token).cloned() };
|
||||||
|
|
||||||
if let Some(session) = session {
|
if let Some(session) = session {
|
||||||
let mut s = session.locked();
|
let mut s = session.locked();
|
||||||
if let Err(e) = s.writable(io, &self.info.unwrapped_read()) {
|
if let Err(e) = s.writable(io, &self.info.unwrapped_read()) {
|
||||||
@ -740,13 +735,13 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connection_closed(&self, token: TimerToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn connection_closed(&self, token: TimerToken, io: &IoContext<NetworkIoMessage>) {
|
||||||
trace!(target: "network", "Connection closed: {}", token);
|
trace!(target: "network", "Connection closed: {}", token);
|
||||||
self.kill_connection(token, io, true);
|
self.kill_connection(token, io, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(collapsible_if))]
|
#[cfg_attr(feature="dev", allow(collapsible_if))]
|
||||||
fn session_readable(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn session_readable(&self, token: StreamToken, io: &IoContext<NetworkIoMessage>) {
|
||||||
let mut ready_data: Vec<ProtocolId> = Vec::new();
|
let mut ready_data: Vec<ProtocolId> = Vec::new();
|
||||||
let mut packet_data: Vec<(ProtocolId, PacketId, Vec<u8>)> = Vec::new();
|
let mut packet_data: Vec<(ProtocolId, PacketId, Vec<u8>)> = Vec::new();
|
||||||
let mut kill = false;
|
let mut kill = false;
|
||||||
@ -831,12 +826,12 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connection_timeout(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn connection_timeout(&self, token: StreamToken, io: &IoContext<NetworkIoMessage>) {
|
||||||
trace!(target: "network", "Connection timeout: {}", token);
|
trace!(target: "network", "Connection timeout: {}", token);
|
||||||
self.kill_connection(token, io, true)
|
self.kill_connection(token, io, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn kill_connection(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>, remote: bool) {
|
fn kill_connection(&self, token: StreamToken, io: &IoContext<NetworkIoMessage>, remote: bool) {
|
||||||
let mut to_disconnect: Vec<ProtocolId> = Vec::new();
|
let mut to_disconnect: Vec<ProtocolId> = Vec::new();
|
||||||
let mut failure_id = None;
|
let mut failure_id = None;
|
||||||
let mut deregister = false;
|
let mut deregister = false;
|
||||||
@ -876,7 +871,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_nodes(&self, io: &IoContext<NetworkIoMessage<Message>>, node_changes: TableUpdates) {
|
fn update_nodes(&self, io: &IoContext<NetworkIoMessage>, node_changes: TableUpdates) {
|
||||||
let mut to_remove: Vec<PeerId> = Vec::new();
|
let mut to_remove: Vec<PeerId> = Vec::new();
|
||||||
{
|
{
|
||||||
let sessions = self.sessions.unwrapped_write();
|
let sessions = self.sessions.unwrapped_write();
|
||||||
@ -895,17 +890,24 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
|||||||
}
|
}
|
||||||
self.nodes.unwrapped_write().update(node_changes);
|
self.nodes.unwrapped_write().update(node_changes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_context<F>(&self, protocol: ProtocolId, io: &IoContext<NetworkIoMessage>, action: F) where F: Fn(&NetworkContext) {
|
||||||
|
let reserved = { self.reserved_nodes.unwrapped_read() };
|
||||||
|
|
||||||
|
let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved);
|
||||||
|
action(&context);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Message: Send + Sync + Clone + 'static {
|
impl IoHandler<NetworkIoMessage> for Host {
|
||||||
/// Initialize networking
|
/// Initialize networking
|
||||||
fn initialize(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
fn initialize(&self, io: &IoContext<NetworkIoMessage>) {
|
||||||
io.register_timer(IDLE, MAINTENANCE_TIMEOUT).expect("Error registering Network idle timer");
|
io.register_timer(IDLE, MAINTENANCE_TIMEOUT).expect("Error registering Network idle timer");
|
||||||
io.message(NetworkIoMessage::InitPublicInterface).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
io.message(NetworkIoMessage::InitPublicInterface).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
||||||
self.maintain_network(io)
|
self.maintain_network(io)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_hup(&self, io: &IoContext<NetworkIoMessage<Message>>, stream: StreamToken) {
|
fn stream_hup(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) {
|
||||||
trace!(target: "network", "Hup: {}", stream);
|
trace!(target: "network", "Hup: {}", stream);
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_SESSION ... LAST_SESSION => self.connection_closed(stream, io),
|
FIRST_SESSION ... LAST_SESSION => self.connection_closed(stream, io),
|
||||||
@ -913,7 +915,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_readable(&self, io: &IoContext<NetworkIoMessage<Message>>, stream: StreamToken) {
|
fn stream_readable(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) {
|
||||||
if self.stopping.load(AtomicOrdering::Acquire) {
|
if self.stopping.load(AtomicOrdering::Acquire) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -930,7 +932,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_writable(&self, io: &IoContext<NetworkIoMessage<Message>>, stream: StreamToken) {
|
fn stream_writable(&self, io: &IoContext<NetworkIoMessage>, stream: StreamToken) {
|
||||||
if self.stopping.load(AtomicOrdering::Acquire) {
|
if self.stopping.load(AtomicOrdering::Acquire) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -943,7 +945,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&self, io: &IoContext<NetworkIoMessage<Message>>, token: TimerToken) {
|
fn timeout(&self, io: &IoContext<NetworkIoMessage>, token: TimerToken) {
|
||||||
if self.stopping.load(AtomicOrdering::Acquire) {
|
if self.stopping.load(AtomicOrdering::Acquire) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -978,7 +980,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn message(&self, io: &IoContext<NetworkIoMessage<Message>>, message: &NetworkIoMessage<Message>) {
|
fn message(&self, io: &IoContext<NetworkIoMessage>, message: &NetworkIoMessage) {
|
||||||
if self.stopping.load(AtomicOrdering::Acquire) {
|
if self.stopping.load(AtomicOrdering::Acquire) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1031,19 +1033,13 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
trace!(target: "network", "Disabling peer {}", peer);
|
trace!(target: "network", "Disabling peer {}", peer);
|
||||||
self.kill_connection(*peer, io, false);
|
self.kill_connection(*peer, io, false);
|
||||||
},
|
},
|
||||||
NetworkIoMessage::User(ref message) => {
|
|
||||||
let reserved = self.reserved_nodes.unwrapped_read();
|
|
||||||
for (p, h) in self.handlers.unwrapped_read().iter() {
|
|
||||||
h.message(&NetworkContext::new(io, p, None, self.sessions.clone(), &reserved), &message);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
NetworkIoMessage::InitPublicInterface =>
|
NetworkIoMessage::InitPublicInterface =>
|
||||||
self.init_public_interface(io).unwrap_or_else(|e| warn!("Error initializing public interface: {:?}", e)),
|
self.init_public_interface(io).unwrap_or_else(|e| warn!("Error initializing public interface: {:?}", e)),
|
||||||
_ => {} // ignore others.
|
_ => {} // ignore others.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop<IoManager<NetworkIoMessage<Message>>>) {
|
fn register_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop<IoManager<NetworkIoMessage>>) {
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_SESSION ... LAST_SESSION => {
|
FIRST_SESSION ... LAST_SESSION => {
|
||||||
let session = { self.sessions.unwrapped_read().get(stream).cloned() };
|
let session = { self.sessions.unwrapped_read().get(stream).cloned() };
|
||||||
@ -1057,7 +1053,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deregister_stream(&self, stream: StreamToken, event_loop: &mut EventLoop<IoManager<NetworkIoMessage<Message>>>) {
|
fn deregister_stream(&self, stream: StreamToken, event_loop: &mut EventLoop<IoManager<NetworkIoMessage>>) {
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_SESSION ... LAST_SESSION => {
|
FIRST_SESSION ... LAST_SESSION => {
|
||||||
let mut connections = self.sessions.unwrapped_write();
|
let mut connections = self.sessions.unwrapped_write();
|
||||||
@ -1071,7 +1067,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop<IoManager<NetworkIoMessage<Message>>>) {
|
fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop<IoManager<NetworkIoMessage>>) {
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_SESSION ... LAST_SESSION => {
|
FIRST_SESSION ... LAST_SESSION => {
|
||||||
let connection = { self.sessions.unwrapped_read().get(stream).cloned() };
|
let connection = { self.sessions.unwrapped_read().get(stream).cloned() };
|
||||||
@ -1152,6 +1148,6 @@ fn host_client_url() {
|
|||||||
let mut config = NetworkConfiguration::new();
|
let mut config = NetworkConfiguration::new();
|
||||||
let key = h256_from_hex("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2");
|
let key = h256_from_hex("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2");
|
||||||
config.use_secret = Some(key);
|
config.use_secret = Some(key);
|
||||||
let host: Host<u32> = Host::new(config, Arc::new(NetworkStats::new())).unwrap();
|
let host: Host = Host::new(config, Arc::new(NetworkStats::new())).unwrap();
|
||||||
assert!(host.local_url().starts_with("enode://101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c@"));
|
assert!(host.local_url().starts_with("enode://101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c@"));
|
||||||
}
|
}
|
||||||
|
@ -24,39 +24,30 @@
|
|||||||
//!
|
//!
|
||||||
//! struct MyHandler;
|
//! struct MyHandler;
|
||||||
//!
|
//!
|
||||||
//! #[derive(Clone)]
|
//! impl NetworkProtocolHandler for MyHandler {
|
||||||
//! struct MyMessage {
|
//! fn initialize(&self, io: &NetworkContext) {
|
||||||
//! data: u32
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! impl NetworkProtocolHandler<MyMessage> for MyHandler {
|
|
||||||
//! fn initialize(&self, io: &NetworkContext<MyMessage>) {
|
|
||||||
//! io.register_timer(0, 1000);
|
//! io.register_timer(0, 1000);
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! fn read(&self, io: &NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
//! fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
//! println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
|
//! println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! fn connected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) {
|
//! fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||||
//! println!("Connected {}", peer);
|
//! println!("Connected {}", peer);
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! fn disconnected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) {
|
//! fn disconnected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||||
//! println!("Disconnected {}", peer);
|
//! println!("Disconnected {}", peer);
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! fn timeout(&self, io: &NetworkContext<MyMessage>, timer: TimerToken) {
|
//! fn timeout(&self, io: &NetworkContext, timer: TimerToken) {
|
||||||
//! println!("Timeout {}", timer);
|
//! println!("Timeout {}", timer);
|
||||||
//! }
|
//! }
|
||||||
//!
|
|
||||||
//! fn message(&self, io: &NetworkContext<MyMessage>, message: &MyMessage) {
|
|
||||||
//! println!("Message {}", message.data);
|
|
||||||
//! }
|
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! fn main () {
|
//! fn main () {
|
||||||
//! let mut service = NetworkService::<MyMessage>::new(NetworkConfiguration::new_local()).expect("Error creating network service");
|
//! let mut service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service");
|
||||||
//! service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]);
|
//! service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]);
|
||||||
//! service.start().expect("Error starting service");
|
//! service.start().expect("Error starting service");
|
||||||
//!
|
//!
|
||||||
@ -84,7 +75,6 @@ pub use network::host::PacketId;
|
|||||||
pub use network::host::NetworkContext;
|
pub use network::host::NetworkContext;
|
||||||
pub use network::service::NetworkService;
|
pub use network::service::NetworkService;
|
||||||
pub use network::host::NetworkIoMessage;
|
pub use network::host::NetworkIoMessage;
|
||||||
pub use network::host::NetworkIoMessage::User as UserMessage;
|
|
||||||
pub use network::error::NetworkError;
|
pub use network::error::NetworkError;
|
||||||
pub use network::host::NetworkConfiguration;
|
pub use network::host::NetworkConfiguration;
|
||||||
pub use network::stats::NetworkStats;
|
pub use network::stats::NetworkStats;
|
||||||
@ -97,19 +87,17 @@ const PROTOCOL_VERSION: u32 = 4;
|
|||||||
/// Network IO protocol handler. This needs to be implemented for each new subprotocol.
|
/// Network IO protocol handler. This needs to be implemented for each new subprotocol.
|
||||||
/// All the handler function are called from within IO event loop.
|
/// All the handler function are called from within IO event loop.
|
||||||
/// `Message` is the type for message data.
|
/// `Message` is the type for message data.
|
||||||
pub trait NetworkProtocolHandler<Message>: Sync + Send where Message: Send + Sync + Clone {
|
pub trait NetworkProtocolHandler: Sync + Send {
|
||||||
/// Initialize the handler
|
/// Initialize the handler
|
||||||
fn initialize(&self, _io: &NetworkContext<Message>) {}
|
fn initialize(&self, _io: &NetworkContext) {}
|
||||||
/// Called when new network packet received.
|
/// Called when new network packet received.
|
||||||
fn read(&self, io: &NetworkContext<Message>, peer: &PeerId, packet_id: u8, data: &[u8]);
|
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]);
|
||||||
/// Called when new peer is connected. Only called when peer supports the same protocol.
|
/// Called when new peer is connected. Only called when peer supports the same protocol.
|
||||||
fn connected(&self, io: &NetworkContext<Message>, peer: &PeerId);
|
fn connected(&self, io: &NetworkContext, peer: &PeerId);
|
||||||
/// Called when a previously connected peer disconnects.
|
/// Called when a previously connected peer disconnects.
|
||||||
fn disconnected(&self, io: &NetworkContext<Message>, peer: &PeerId);
|
fn disconnected(&self, io: &NetworkContext, peer: &PeerId);
|
||||||
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
||||||
fn timeout(&self, _io: &NetworkContext<Message>, _timer: TimerToken) {}
|
fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {}
|
||||||
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
|
|
||||||
fn message(&self, _io: &NetworkContext<Message>, _message: &Message) {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Non-reserved peer modes.
|
/// Non-reserved peer modes.
|
||||||
@ -130,4 +118,4 @@ impl NonReservedPeerMode {
|
|||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,30 +20,30 @@ use panics::*;
|
|||||||
use misc::RwLockable;
|
use misc::RwLockable;
|
||||||
use network::{NetworkProtocolHandler, NetworkConfiguration};
|
use network::{NetworkProtocolHandler, NetworkConfiguration};
|
||||||
use network::error::NetworkError;
|
use network::error::NetworkError;
|
||||||
use network::host::{Host, NetworkIoMessage, ProtocolId};
|
use network::host::{Host, NetworkContext, NetworkIoMessage, ProtocolId};
|
||||||
use network::stats::NetworkStats;
|
use network::stats::NetworkStats;
|
||||||
use io::*;
|
use io::*;
|
||||||
|
|
||||||
/// IO Service with networking
|
/// IO Service with networking
|
||||||
/// `Message` defines a notification data type.
|
/// `Message` defines a notification data type.
|
||||||
pub struct NetworkService<Message> where Message: Send + Sync + Clone + 'static {
|
pub struct NetworkService {
|
||||||
io_service: IoService<NetworkIoMessage<Message>>,
|
io_service: IoService<NetworkIoMessage>,
|
||||||
host_info: String,
|
host_info: String,
|
||||||
host: RwLock<Option<Arc<Host<Message>>>>,
|
host: RwLock<Option<Arc<Host>>>,
|
||||||
stats: Arc<NetworkStats>,
|
stats: Arc<NetworkStats>,
|
||||||
panic_handler: Arc<PanicHandler>,
|
panic_handler: Arc<PanicHandler>,
|
||||||
config: NetworkConfiguration,
|
config: NetworkConfiguration,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> NetworkService<Message> where Message: Send + Sync + Clone + 'static {
|
impl NetworkService {
|
||||||
/// Starts IO event loop
|
/// Starts IO event loop
|
||||||
pub fn new(config: NetworkConfiguration) -> Result<NetworkService<Message>, UtilError> {
|
pub fn new(config: NetworkConfiguration) -> Result<NetworkService, UtilError> {
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
let io_service = try!(IoService::<NetworkIoMessage<Message>>::start());
|
let io_service = try!(IoService::<NetworkIoMessage>::start());
|
||||||
panic_handler.forward_from(&io_service);
|
panic_handler.forward_from(&io_service);
|
||||||
|
|
||||||
let stats = Arc::new(NetworkStats::new());
|
let stats = Arc::new(NetworkStats::new());
|
||||||
let host_info = Host::<Message>::client_version();
|
let host_info = Host::client_version();
|
||||||
Ok(NetworkService {
|
Ok(NetworkService {
|
||||||
io_service: io_service,
|
io_service: io_service,
|
||||||
host_info: host_info,
|
host_info: host_info,
|
||||||
@ -55,7 +55,7 @@ impl<Message> NetworkService<Message> where Message: Send + Sync + Clone + 'stat
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Regiter a new protocol handler with the event loop.
|
/// Regiter a new protocol handler with the event loop.
|
||||||
pub fn register_protocol(&self, handler: Arc<NetworkProtocolHandler<Message>+Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> {
|
pub fn register_protocol(&self, handler: Arc<NetworkProtocolHandler + Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> {
|
||||||
try!(self.io_service.send_message(NetworkIoMessage::AddHandler {
|
try!(self.io_service.send_message(NetworkIoMessage::AddHandler {
|
||||||
handler: handler,
|
handler: handler,
|
||||||
protocol: protocol,
|
protocol: protocol,
|
||||||
@ -70,7 +70,7 @@ impl<Message> NetworkService<Message> where Message: Send + Sync + Clone + 'stat
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns underlying io service.
|
/// Returns underlying io service.
|
||||||
pub fn io(&self) -> &IoService<NetworkIoMessage<Message>> {
|
pub fn io(&self) -> &IoService<NetworkIoMessage> {
|
||||||
&self.io_service
|
&self.io_service
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,9 +146,18 @@ impl<Message> NetworkService<Message> where Message: Send + Sync + Clone + 'stat
|
|||||||
host.set_non_reserved_mode(mode, &io_ctxt);
|
host.set_non_reserved_mode(mode, &io_ctxt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Executes action in the network context
|
||||||
|
pub fn with_context<F>(&self, protocol: ProtocolId, action: F) where F: Fn(&NetworkContext) {
|
||||||
|
let io = IoContext::new(self.io_service.channel(), 0);
|
||||||
|
let host = self.host.unwrapped_read();
|
||||||
|
if let Some(ref host) = host.as_ref() {
|
||||||
|
host.with_context(protocol, &io, action);
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> MayPanic for NetworkService<Message> where Message: Send + Sync + Clone + 'static {
|
impl MayPanic for NetworkService {
|
||||||
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
|
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
|
||||||
self.panic_handler.on_panic(closure);
|
self.panic_handler.on_panic(closure);
|
||||||
}
|
}
|
||||||
|
@ -30,22 +30,17 @@ pub struct TestProtocol {
|
|||||||
pub got_disconnect: AtomicBool,
|
pub got_disconnect: AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct TestProtocolMessage {
|
|
||||||
payload: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestProtocol {
|
impl TestProtocol {
|
||||||
pub fn new(drop_session: bool) -> Self {
|
pub fn new(drop_session: bool) -> Self {
|
||||||
TestProtocol {
|
TestProtocol {
|
||||||
packet: Mutex::new(Vec::new()),
|
packet: Mutex::new(Vec::new()),
|
||||||
got_timeout: AtomicBool::new(false),
|
got_timeout: AtomicBool::new(false),
|
||||||
got_disconnect: AtomicBool::new(false),
|
got_disconnect: AtomicBool::new(false),
|
||||||
drop_session: drop_session,
|
drop_session: drop_session,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Creates and register protocol with the network service
|
/// Creates and register protocol with the network service
|
||||||
pub fn register(service: &mut NetworkService<TestProtocolMessage>, drop_session: bool) -> Arc<TestProtocol> {
|
pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc<TestProtocol> {
|
||||||
let handler = Arc::new(TestProtocol::new(drop_session));
|
let handler = Arc::new(TestProtocol::new(drop_session));
|
||||||
service.register_protocol(handler.clone(), "test", &[42u8, 43u8]).expect("Error registering test protocol handler");
|
service.register_protocol(handler.clone(), "test", &[42u8, 43u8]).expect("Error registering test protocol handler");
|
||||||
handler
|
handler
|
||||||
@ -64,17 +59,17 @@ impl TestProtocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkProtocolHandler<TestProtocolMessage> for TestProtocol {
|
impl NetworkProtocolHandler for TestProtocol {
|
||||||
fn initialize(&self, io: &NetworkContext<TestProtocolMessage>) {
|
fn initialize(&self, io: &NetworkContext) {
|
||||||
io.register_timer(0, 10).unwrap();
|
io.register_timer(0, 10).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read(&self, _io: &NetworkContext<TestProtocolMessage>, _peer: &PeerId, packet_id: u8, data: &[u8]) {
|
fn read(&self, _io: &NetworkContext, _peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
assert_eq!(packet_id, 33);
|
assert_eq!(packet_id, 33);
|
||||||
self.packet.locked().extend(data);
|
self.packet.locked().extend(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connected(&self, io: &NetworkContext<TestProtocolMessage>, peer: &PeerId) {
|
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||||
assert!(io.peer_info(*peer).contains("Parity"));
|
assert!(io.peer_info(*peer).contains("Parity"));
|
||||||
if self.drop_session {
|
if self.drop_session {
|
||||||
io.disconnect_peer(*peer)
|
io.disconnect_peer(*peer)
|
||||||
@ -83,13 +78,12 @@ impl NetworkProtocolHandler<TestProtocolMessage> for TestProtocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn disconnected(&self, _io: &NetworkContext<TestProtocolMessage>, _peer: &PeerId) {
|
fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) {
|
||||||
self.got_disconnect.store(true, AtomicOrdering::Relaxed);
|
self.got_disconnect.store(true, AtomicOrdering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
||||||
fn timeout(&self, io: &NetworkContext<TestProtocolMessage>, timer: TimerToken) {
|
fn timeout(&self, _io: &NetworkContext, timer: TimerToken) {
|
||||||
io.message(TestProtocolMessage { payload: 22 }).unwrap();
|
|
||||||
assert_eq!(timer, 0);
|
assert_eq!(timer, 0);
|
||||||
self.got_timeout.store(true, AtomicOrdering::Relaxed);
|
self.got_timeout.store(true, AtomicOrdering::Relaxed);
|
||||||
}
|
}
|
||||||
@ -98,7 +92,7 @@ impl NetworkProtocolHandler<TestProtocolMessage> for TestProtocol {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn net_service() {
|
fn net_service() {
|
||||||
let service = NetworkService::<TestProtocolMessage>::new(NetworkConfiguration::new_local()).expect("Error creating network service");
|
let service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service");
|
||||||
service.start().unwrap();
|
service.start().unwrap();
|
||||||
service.register_protocol(Arc::new(TestProtocol::new(false)), "myproto", &[1u8]).unwrap();
|
service.register_protocol(Arc::new(TestProtocol::new(false)), "myproto", &[1u8]).unwrap();
|
||||||
}
|
}
|
||||||
@ -110,13 +104,13 @@ fn net_connect() {
|
|||||||
let mut config1 = NetworkConfiguration::new_local();
|
let mut config1 = NetworkConfiguration::new_local();
|
||||||
config1.use_secret = Some(key1.secret().clone());
|
config1.use_secret = Some(key1.secret().clone());
|
||||||
config1.boot_nodes = vec![ ];
|
config1.boot_nodes = vec![ ];
|
||||||
let mut service1 = NetworkService::<TestProtocolMessage>::new(config1).unwrap();
|
let mut service1 = NetworkService::new(config1).unwrap();
|
||||||
service1.start().unwrap();
|
service1.start().unwrap();
|
||||||
let handler1 = TestProtocol::register(&mut service1, false);
|
let handler1 = TestProtocol::register(&mut service1, false);
|
||||||
let mut config2 = NetworkConfiguration::new_local();
|
let mut config2 = NetworkConfiguration::new_local();
|
||||||
info!("net_connect: local URL: {}", service1.local_url().unwrap());
|
info!("net_connect: local URL: {}", service1.local_url().unwrap());
|
||||||
config2.boot_nodes = vec![ service1.local_url().unwrap() ];
|
config2.boot_nodes = vec![ service1.local_url().unwrap() ];
|
||||||
let mut service2 = NetworkService::<TestProtocolMessage>::new(config2).unwrap();
|
let mut service2 = NetworkService::new(config2).unwrap();
|
||||||
service2.start().unwrap();
|
service2.start().unwrap();
|
||||||
let handler2 = TestProtocol::register(&mut service2, false);
|
let handler2 = TestProtocol::register(&mut service2, false);
|
||||||
while !handler1.got_packet() && !handler2.got_packet() && (service1.stats().sessions() == 0 || service2.stats().sessions() == 0) {
|
while !handler1.got_packet() && !handler2.got_packet() && (service1.stats().sessions() == 0 || service2.stats().sessions() == 0) {
|
||||||
@ -129,7 +123,7 @@ fn net_connect() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn net_start_stop() {
|
fn net_start_stop() {
|
||||||
let config = NetworkConfiguration::new_local();
|
let config = NetworkConfiguration::new_local();
|
||||||
let service = NetworkService::<TestProtocolMessage>::new(config).unwrap();
|
let service = NetworkService::new(config).unwrap();
|
||||||
service.start().unwrap();
|
service.start().unwrap();
|
||||||
service.stop().unwrap();
|
service.stop().unwrap();
|
||||||
service.start().unwrap();
|
service.start().unwrap();
|
||||||
@ -141,12 +135,12 @@ fn net_disconnect() {
|
|||||||
let mut config1 = NetworkConfiguration::new_local();
|
let mut config1 = NetworkConfiguration::new_local();
|
||||||
config1.use_secret = Some(key1.secret().clone());
|
config1.use_secret = Some(key1.secret().clone());
|
||||||
config1.boot_nodes = vec![ ];
|
config1.boot_nodes = vec![ ];
|
||||||
let mut service1 = NetworkService::<TestProtocolMessage>::new(config1).unwrap();
|
let mut service1 = NetworkService::new(config1).unwrap();
|
||||||
service1.start().unwrap();
|
service1.start().unwrap();
|
||||||
let handler1 = TestProtocol::register(&mut service1, false);
|
let handler1 = TestProtocol::register(&mut service1, false);
|
||||||
let mut config2 = NetworkConfiguration::new_local();
|
let mut config2 = NetworkConfiguration::new_local();
|
||||||
config2.boot_nodes = vec![ service1.local_url().unwrap() ];
|
config2.boot_nodes = vec![ service1.local_url().unwrap() ];
|
||||||
let mut service2 = NetworkService::<TestProtocolMessage>::new(config2).unwrap();
|
let mut service2 = NetworkService::new(config2).unwrap();
|
||||||
service2.start().unwrap();
|
service2.start().unwrap();
|
||||||
let handler2 = TestProtocol::register(&mut service2, true);
|
let handler2 = TestProtocol::register(&mut service2, true);
|
||||||
while !(handler1.got_disconnect() && handler2.got_disconnect()) {
|
while !(handler1.got_disconnect() && handler2.got_disconnect()) {
|
||||||
@ -159,7 +153,7 @@ fn net_disconnect() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn net_timeout() {
|
fn net_timeout() {
|
||||||
let config = NetworkConfiguration::new_local();
|
let config = NetworkConfiguration::new_local();
|
||||||
let mut service = NetworkService::<TestProtocolMessage>::new(config).unwrap();
|
let mut service = NetworkService::new(config).unwrap();
|
||||||
service.start().unwrap();
|
service.start().unwrap();
|
||||||
let handler = TestProtocol::register(&mut service, false);
|
let handler = TestProtocol::register(&mut service, false);
|
||||||
while !handler.got_timeout() {
|
while !handler.got_timeout() {
|
||||||
|
157
util/src/snappy.rs
Normal file
157
util/src/snappy.rs
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Snappy compression bindings.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use libc::{c_char, c_int, size_t};
|
||||||
|
|
||||||
|
const SNAPPY_OK: c_int = 0;
|
||||||
|
const SNAPPY_INVALID_INPUT: c_int = 1;
|
||||||
|
const SNAPPY_BUFFER_TOO_SMALL: c_int = 2;
|
||||||
|
|
||||||
|
#[link(name = "snappy")]
|
||||||
|
extern {
|
||||||
|
fn snappy_compress(
|
||||||
|
input: *const c_char,
|
||||||
|
input_len: size_t,
|
||||||
|
compressed: *mut c_char,
|
||||||
|
compressed_len: *mut size_t
|
||||||
|
) -> c_int;
|
||||||
|
|
||||||
|
fn snappy_max_compressed_length(source_len: size_t) -> size_t;
|
||||||
|
|
||||||
|
fn snappy_uncompress(
|
||||||
|
compressed: *const c_char,
|
||||||
|
compressed_len: size_t,
|
||||||
|
uncompressed: *mut c_char,
|
||||||
|
uncompressed_len: *mut size_t,
|
||||||
|
) -> c_int;
|
||||||
|
|
||||||
|
fn snappy_uncompressed_length(
|
||||||
|
compressed: *const c_char,
|
||||||
|
compressed_len: size_t,
|
||||||
|
result: *mut size_t,
|
||||||
|
) -> c_int;
|
||||||
|
|
||||||
|
fn snappy_validate_compressed_buffer(
|
||||||
|
compressed: *const c_char,
|
||||||
|
compressed_len: size_t,
|
||||||
|
) -> c_int;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempted to decompress an uncompressed buffer.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InvalidInput;
|
||||||
|
|
||||||
|
impl fmt::Display for InvalidInput {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Attempted snappy decompression with invalid input")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The maximum compressed length given a size.
|
||||||
|
pub fn max_compressed_len(len: usize) -> usize {
|
||||||
|
unsafe { snappy_max_compressed_length(len as size_t) as usize }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// How large the given data will be when decompressed.
|
||||||
|
pub fn decompressed_len(compressed: &[u8]) -> Result<usize, InvalidInput> {
|
||||||
|
let mut size: size_t = 0;
|
||||||
|
let len = compressed.len() as size_t;
|
||||||
|
|
||||||
|
let status = unsafe { snappy_uncompressed_length(compressed.as_ptr() as *const c_char, len, &mut size) };
|
||||||
|
|
||||||
|
if status == SNAPPY_INVALID_INPUT {
|
||||||
|
Err(InvalidInput)
|
||||||
|
} else {
|
||||||
|
Ok(size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compress a buffer using snappy.
|
||||||
|
pub fn compress(input: &[u8]) -> Vec<u8> {
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
compress_into(input, &mut buf);
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compress a buffer using snappy, writing the result into
|
||||||
|
/// the given output buffer, growing it if necessary.
|
||||||
|
/// Otherwise, returns the length of the compressed data.
|
||||||
|
pub fn compress_into(input: &[u8], output: &mut Vec<u8>) -> usize {
|
||||||
|
let mut len = max_compressed_len(input.len());
|
||||||
|
|
||||||
|
if output.len() < len {
|
||||||
|
output.resize(len, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = unsafe {
|
||||||
|
snappy_compress(
|
||||||
|
input.as_ptr() as *const c_char,
|
||||||
|
input.len() as size_t,
|
||||||
|
output.as_mut_ptr() as *mut c_char,
|
||||||
|
&mut len as &mut size_t,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
match status {
|
||||||
|
SNAPPY_OK => len,
|
||||||
|
SNAPPY_INVALID_INPUT => panic!("snappy compression has no concept of invalid input"),
|
||||||
|
SNAPPY_BUFFER_TOO_SMALL => panic!("buffer cannot be too small, the capacity was just ensured."),
|
||||||
|
_ => panic!("snappy returned unspecified status"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decompress a buffer using snappy. Will return an error if the buffer is not snappy-compressed.
|
||||||
|
pub fn decompress(input: &[u8]) -> Result<Vec<u8>, InvalidInput> {
|
||||||
|
let mut v = Vec::new();
|
||||||
|
decompress_into(input, &mut v).map(|_| v)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decompress a buffer using snappy, writing the result into
|
||||||
|
/// the given output buffer, growing it if necessary.
|
||||||
|
/// Will error if the input buffer is not snappy-compressed.
|
||||||
|
/// Otherwise, returns the length of the decompressed data.
|
||||||
|
pub fn decompress_into(input: &[u8], output: &mut Vec<u8>) -> Result<usize, InvalidInput> {
|
||||||
|
let mut len = try!(decompressed_len(input));
|
||||||
|
|
||||||
|
if output.len() < len {
|
||||||
|
output.resize(len, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let status = unsafe {
|
||||||
|
snappy_uncompress(
|
||||||
|
input.as_ptr() as *const c_char,
|
||||||
|
input.len() as size_t,
|
||||||
|
output.as_mut_ptr() as *mut c_char,
|
||||||
|
&mut len as &mut size_t,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
match status {
|
||||||
|
SNAPPY_OK => Ok(len as usize),
|
||||||
|
SNAPPY_INVALID_INPUT => Err(InvalidInput),
|
||||||
|
SNAPPY_BUFFER_TOO_SMALL => panic!("buffer cannot be too small, size was just set to large enough."),
|
||||||
|
_ => panic!("snappy returned unspecified status"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate a compressed buffer. True if valid, false if not.
|
||||||
|
pub fn validate_compressed_buffer(input: &[u8]) -> bool {
|
||||||
|
let status = unsafe { snappy_validate_compressed_buffer(input.as_ptr() as *const c_char, input.len() as size_t )};
|
||||||
|
status == SNAPPY_OK
|
||||||
|
}
|
@ -195,7 +195,7 @@ fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStre
|
|||||||
}
|
}
|
||||||
|
|
||||||
// take slices
|
// take slices
|
||||||
let key: &Vec<u8> = &input[0].0;
|
let key: &[u8] = &input[0].0;
|
||||||
let value: &[u8] = &input[0].1;
|
let value: &[u8] = &input[0].1;
|
||||||
|
|
||||||
// if the slice contains just one item, append the suffix of the key
|
// if the slice contains just one item, append the suffix of the key
|
||||||
|
@ -17,23 +17,23 @@
|
|||||||
//! Vector extensions.
|
//! Vector extensions.
|
||||||
|
|
||||||
/// Returns len of prefix shared with elem
|
/// Returns len of prefix shared with elem
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// extern crate ethcore_util as util;
|
/// extern crate ethcore_util as util;
|
||||||
/// use util::vector::SharedPrefix;
|
/// use util::vector::SharedPrefix;
|
||||||
///
|
///
|
||||||
/// fn main () {
|
/// fn main () {
|
||||||
/// let a = vec![1,2,3,3,5];
|
/// let a = vec![1,2,3,3,5];
|
||||||
/// let b = vec![1,2,3];
|
/// let b = vec![1,2,3];
|
||||||
/// assert_eq!(a.shared_prefix_len(&b), 3);
|
/// assert_eq!(a.shared_prefix_len(&b), 3);
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub trait SharedPrefix <T> {
|
pub trait SharedPrefix<T> {
|
||||||
/// Get common prefix length
|
/// Get common prefix length
|
||||||
fn shared_prefix_len(&self, elem: &[T]) -> usize;
|
fn shared_prefix_len(&self, elem: &[T]) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl <T> SharedPrefix<T> for Vec<T> where T: Eq {
|
impl<T> SharedPrefix<T> for [T] where T: Eq {
|
||||||
fn shared_prefix_len(&self, elem: &[T]) -> usize {
|
fn shared_prefix_len(&self, elem: &[T]) -> usize {
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
let len = cmp::min(self.len(), elem.len());
|
let len = cmp::min(self.len(), elem.len());
|
||||||
@ -58,7 +58,7 @@ mod test {
|
|||||||
let b = vec![1,2,3];
|
let b = vec![1,2,3];
|
||||||
assert_eq!(a.shared_prefix_len(&b), 3);
|
assert_eq!(a.shared_prefix_len(&b), 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_shared_prefix3() {
|
fn test_shared_prefix3() {
|
||||||
let a = vec![1,2,3,4,5,6];
|
let a = vec![1,2,3,4,5,6];
|
||||||
|
Loading…
Reference in New Issue
Block a user